prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import copy
import pandas as pd
import numpy as np
import numpy.testing as npt
from skbio.util._testing import assert_data_frame_almost_equal
class MetadataMixinTests:
def test_constructor_invalid_type(self):
for md in (0, 'a', ('f', 'o', 'o'), np.array([]), pd.DataFrame()):
with self.assertRaisesRegex(TypeError, 'metadata must be a dict'):
self._metadata_constructor_(metadata=md)
def test_constructor_no_metadata(self):
for md in None, {}:
obj = self._metadata_constructor_(metadata=md)
self.assertEqual(obj.metadata, {})
def test_constructor_with_metadata(self):
obj = self._metadata_constructor_(metadata={'foo': 'bar'})
self.assertEqual(obj.metadata, {'foo': 'bar'})
obj = self._metadata_constructor_(
metadata={'': '', 123: {'a': 'b', 'c': 'd'}})
self.assertEqual(obj.metadata, {'': '', 123: {'a': 'b', 'c': 'd'}})
def test_constructor_makes_shallow_copy_of_metadata(self):
md = {'foo': 'bar', 42: []}
obj = self._metadata_constructor_(metadata=md)
self.assertEqual(obj.metadata, md)
self.assertIsNot(obj.metadata, md)
md['foo'] = 'baz'
self.assertEqual(obj.metadata, {'foo': 'bar', 42: []})
md[42].append(True)
self.assertEqual(obj.metadata, {'foo': 'bar', 42: [True]})
def test_eq(self):
self.assertReallyEqual(
self._metadata_constructor_(metadata={'foo': 42}),
self._metadata_constructor_(metadata={'foo': 42}))
self.assertReallyEqual(
self._metadata_constructor_(metadata={'foo': 42, 123: {}}),
self._metadata_constructor_(metadata={'foo': 42, 123: {}}))
def test_eq_missing_metadata(self):
self.assertReallyEqual(self._metadata_constructor_(),
self._metadata_constructor_())
self.assertReallyEqual(self._metadata_constructor_(),
self._metadata_constructor_(metadata={}))
self.assertReallyEqual(self._metadata_constructor_(metadata={}),
self._metadata_constructor_(metadata={}))
def test_ne(self):
# Both have metadata.
obj1 = self._metadata_constructor_(metadata={'id': 'foo'})
obj2 = self._metadata_constructor_(metadata={'id': 'bar'})
self.assertReallyNotEqual(obj1, obj2)
# One has metadata.
obj1 = self._metadata_constructor_(metadata={'id': 'foo'})
obj2 = self._metadata_constructor_()
self.assertReallyNotEqual(obj1, obj2)
def test_copy_metadata_none(self):
obj = self._metadata_constructor_()
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertEqual(obj.metadata, {})
self.assertEqual(obj_copy.metadata, {})
self.assertIsNot(obj.metadata, obj_copy.metadata)
def test_copy_metadata_empty(self):
obj = self._metadata_constructor_(metadata={})
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertEqual(obj.metadata, {})
self.assertEqual(obj_copy.metadata, {})
self.assertIsNot(obj.metadata, obj_copy.metadata)
def test_copy_with_metadata(self):
obj = self._metadata_constructor_(metadata={'foo': [1]})
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertEqual(obj.metadata, {'foo': [1]})
self.assertEqual(obj_copy.metadata, {'foo': [1]})
self.assertIsNot(obj.metadata, obj_copy.metadata)
self.assertIs(obj.metadata['foo'], obj_copy.metadata['foo'])
obj_copy.metadata['foo'].append(2)
obj_copy.metadata['foo2'] = 42
self.assertEqual(obj_copy.metadata, {'foo': [1, 2], 'foo2': 42})
self.assertEqual(obj.metadata, {'foo': [1, 2]})
def test_deepcopy_metadata_none(self):
obj = self._metadata_constructor_()
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertEqual(obj.metadata, {})
self.assertEqual(obj_copy.metadata, {})
self.assertIsNot(obj.metadata, obj_copy.metadata)
def test_deepcopy_metadata_empty(self):
obj = self._metadata_constructor_(metadata={})
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertEqual(obj.metadata, {})
self.assertEqual(obj_copy.metadata, {})
self.assertIsNot(obj.metadata, obj_copy.metadata)
def test_deepcopy_with_metadata(self):
obj = self._metadata_constructor_(metadata={'foo': [1]})
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertEqual(obj.metadata, {'foo': [1]})
self.assertEqual(obj_copy.metadata, {'foo': [1]})
self.assertIsNot(obj.metadata, obj_copy.metadata)
self.assertIsNot(obj.metadata['foo'], obj_copy.metadata['foo'])
obj_copy.metadata['foo'].append(2)
obj_copy.metadata['foo2'] = 42
self.assertEqual(obj_copy.metadata, {'foo': [1, 2], 'foo2': 42})
self.assertEqual(obj.metadata, {'foo': [1]})
def test_deepcopy_memo_is_respected(self):
# Basic test to ensure deepcopy's memo is passed through to recursive
# deepcopy calls.
obj = self._metadata_constructor_(metadata={'foo': 'bar'})
memo = {}
copy.deepcopy(obj, memo)
self.assertGreater(len(memo), 2)
def test_metadata_getter(self):
obj = self._metadata_constructor_(
metadata={42: 'foo', ('hello', 'world'): 43})
self.assertIsInstance(obj.metadata, dict)
self.assertEqual(obj.metadata, {42: 'foo', ('hello', 'world'): 43})
obj.metadata[42] = 'bar'
self.assertEqual(obj.metadata, {42: 'bar', ('hello', 'world'): 43})
def test_metadata_getter_no_metadata(self):
obj = self._metadata_constructor_()
self.assertIsInstance(obj.metadata, dict)
self.assertEqual(obj.metadata, {})
def test_metadata_setter(self):
obj = self._metadata_constructor_()
self.assertEqual(obj.metadata, {})
obj.metadata = {'hello': 'world'}
self.assertEqual(obj.metadata, {'hello': 'world'})
obj.metadata = {}
self.assertEqual(obj.metadata, {})
def test_metadata_setter_makes_shallow_copy(self):
obj = self._metadata_constructor_()
md = {'foo': 'bar', 42: []}
obj.metadata = md
self.assertEqual(obj.metadata, md)
self.assertIsNot(obj.metadata, md)
md['foo'] = 'baz'
self.assertEqual(obj.metadata, {'foo': 'bar', 42: []})
md[42].append(True)
self.assertEqual(obj.metadata, {'foo': 'bar', 42: [True]})
def test_metadata_setter_invalid_type(self):
obj = self._metadata_constructor_(metadata={123: 456})
for md in (None, 0, 'a', ('f', 'o', 'o'), np.array([]),
pd.DataFrame()):
with self.assertRaisesRegex(TypeError, 'metadata must be a dict'):
obj.metadata = md
self.assertEqual(obj.metadata, {123: 456})
def test_metadata_deleter(self):
obj = self._metadata_constructor_(metadata={'foo': 'bar'})
self.assertEqual(obj.metadata, {'foo': 'bar'})
del obj.metadata
self.assertEqual(obj.metadata, {})
# Delete again.
del obj.metadata
self.assertEqual(obj.metadata, {})
obj = self._metadata_constructor_()
self.assertEqual(obj.metadata, {})
del obj.metadata
self.assertEqual(obj.metadata, {})
def test_has_metadata(self):
obj = self._metadata_constructor_()
self.assertFalse(obj.has_metadata())
self.assertFalse(
self._metadata_constructor_(metadata={}).has_metadata())
self.assertTrue(
self._metadata_constructor_(metadata={'': ''}).has_metadata())
self.assertTrue(
self._metadata_constructor_(
metadata={'foo': 42}).has_metadata())
class PositionalMetadataMixinTests:
def test_constructor_invalid_positional_metadata_type(self):
with self.assertRaisesRegex(TypeError,
'Invalid positional metadata. Must be '
'consumable by `pd.DataFrame` constructor.'
' Original pandas error message: '):
self._positional_metadata_constructor_(0, positional_metadata=2)
def test_constructor_positional_metadata_len_mismatch(self):
# Zero elements.
with self.assertRaisesRegex(ValueError, '\(0\).*\(4\)'):
self._positional_metadata_constructor_(4, positional_metadata=[])
# Not enough elements.
with self.assertRaisesRegex(ValueError, '\(3\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=[2, 3, 4])
# Too many elements.
with self.assertRaisesRegex(ValueError, '\(5\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=[2, 3, 4, 5, 6])
# Series not enough rows.
with self.assertRaisesRegex(ValueError, '\(3\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=pd.Series(range(3)))
# Series too many rows.
with self.assertRaisesRegex(ValueError, '\(5\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=pd.Series(range(5)))
# DataFrame not enough rows.
with self.assertRaisesRegex(ValueError, '\(3\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=pd.DataFrame({'quality': range(3)}))
# DataFrame too many rows.
with self.assertRaisesRegex(ValueError, '\(5\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=pd.DataFrame({'quality': range(5)}))
# Empty DataFrame wrong size.
with self.assertRaisesRegex(ValueError, '\(2\).*\(3\)'):
self._positional_metadata_constructor_(
3, positional_metadata=pd.DataFrame(index=range(2)))
def test_constructor_no_positional_metadata(self):
# Length zero with missing/empty positional metadata.
for empty in None, {}, pd.DataFrame():
obj = self._positional_metadata_constructor_(
0, positional_metadata=empty)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=np.arange(0)))
# Nonzero length with missing positional metadata.
obj = self._positional_metadata_constructor_(
3, positional_metadata=None)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=np.arange(3)))
def test_constructor_with_positional_metadata_len_zero(self):
for data in [], (), np.array([]):
obj = self._positional_metadata_constructor_(
0, positional_metadata={'foo': data})
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': data}, index=np.arange(0)))
def test_constructor_with_positional_metadata_len_one(self):
for data in [2], (2, ), np.array([2]):
obj = self._positional_metadata_constructor_(
1, positional_metadata={'foo': data})
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': data}, index=np.arange(1)))
def test_constructor_with_positional_metadata_len_greater_than_one(self):
for data in ([0, 42, 42, 1, 0, 8, 100, 0, 0],
(0, 42, 42, 1, 0, 8, 100, 0, 0),
np.array([0, 42, 42, 1, 0, 8, 100, 0, 0])):
obj = self._positional_metadata_constructor_(
9, positional_metadata={'foo': data})
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': data}, index=np.arange(9)))
def test_constructor_with_positional_metadata_multiple_columns(self):
obj = self._positional_metadata_constructor_(
5, positional_metadata={'foo': np.arange(5),
'bar': np.arange(5)[::-1]})
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': np.arange(5),
'bar': np.arange(5)[::-1]}, index=np.arange(5)))
def test_constructor_with_positional_metadata_custom_index(self):
df = pd.DataFrame({'foo': np.arange(5), 'bar': np.arange(5)[::-1]},
index=['a', 'b', 'c', 'd', 'e'])
obj = self._positional_metadata_constructor_(
5, positional_metadata=df)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': np.arange(5),
'bar': np.arange(5)[::-1]}, index=np.arange(5)))
def test_constructor_makes_shallow_copy_of_positional_metadata(self):
df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=['a', 'b', 'c'])
obj = self._positional_metadata_constructor_(
3, positional_metadata=df)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=np.arange(3)))
self.assertIsNot(obj.positional_metadata, df)
# Original df is not mutated.
orig_df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=['a', 'b', 'c'])
assert_data_frame_almost_equal(df, orig_df)
# Change values of column (using same dtype).
df['foo'] = [42, 42, 42]
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=np.arange(3)))
# Change single value of underlying data.
df.values[0][0] = 10
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=np.arange(3)))
# Mutate list (not a deep copy).
df['bar'][0].append(42)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[42], [], []]},
index=np.arange(3)))
def test_eq_basic(self):
obj1 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
obj2 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
self.assertReallyEqual(obj1, obj2)
def test_eq_from_different_source(self):
obj1 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': np.array([1, 2, 3])})
obj2 = self._positional_metadata_constructor_(
3, positional_metadata=pd.DataFrame({'foo': [1, 2, 3]},
index=['foo', 'bar', 'baz']))
self.assertReallyEqual(obj1, obj2)
def test_eq_missing_positional_metadata(self):
for empty in None, {}, pd.DataFrame(), pd.DataFrame(index=[]):
obj = self._positional_metadata_constructor_(
0, positional_metadata=empty)
self.assertReallyEqual(
obj,
self._positional_metadata_constructor_(0))
self.assertReallyEqual(
obj,
self._positional_metadata_constructor_(
0, positional_metadata=empty))
for empty in None, pd.DataFrame(index=['a', 'b']):
obj = self._positional_metadata_constructor_(
2, positional_metadata=empty)
self.assertReallyEqual(
obj,
self._positional_metadata_constructor_(2))
self.assertReallyEqual(
obj,
self._positional_metadata_constructor_(
2, positional_metadata=empty))
def test_ne_len_zero(self):
# Both have positional metadata.
obj1 = self._positional_metadata_constructor_(
0, positional_metadata={'foo': []})
obj2 = self._positional_metadata_constructor_(
0, positional_metadata={'foo': [], 'bar': []})
self.assertReallyNotEqual(obj1, obj2)
# One has positional metadata.
obj1 = self._positional_metadata_constructor_(
0, positional_metadata={'foo': []})
obj2 = self._positional_metadata_constructor_(0)
self.assertReallyNotEqual(obj1, obj2)
def test_ne_len_greater_than_zero(self):
# Both have positional metadata.
obj1 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
obj2 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 2]})
self.assertReallyNotEqual(obj1, obj2)
# One has positional metadata.
obj1 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
obj2 = self._positional_metadata_constructor_(3)
self.assertReallyNotEqual(obj1, obj2)
def test_ne_len_mismatch(self):
obj1 = self._positional_metadata_constructor_(
3, positional_metadata=pd.DataFrame(index=range(3)))
obj2 = self._positional_metadata_constructor_(
2, positional_metadata=pd.DataFrame(index=range(2)))
self.assertReallyNotEqual(obj1, obj2)
def test_copy_positional_metadata_none(self):
obj = self._positional_metadata_constructor_(3)
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=range(3)))
assert_data_frame_almost_equal(obj_copy.positional_metadata,
pd.DataFrame(index=range(3)))
self.assertIsNot(obj.positional_metadata, obj_copy.positional_metadata)
def test_copy_positional_metadata_empty(self):
obj = self._positional_metadata_constructor_(
3, positional_metadata=pd.DataFrame(index=range(3)))
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=range(3)))
assert_data_frame_almost_equal(obj_copy.positional_metadata,
pd.DataFrame(index=range(3)))
self.assertIsNot(obj.positional_metadata, obj_copy.positional_metadata)
def test_copy_with_positional_metadata(self):
obj = self._positional_metadata_constructor_(
4, positional_metadata={'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]})
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]}, index=range(4)))
assert_data_frame_almost_equal(
obj_copy.positional_metadata,
pd.DataFrame({'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]}, index=range(4)))
self.assertIsNot(obj.positional_metadata,
obj_copy.positional_metadata)
self.assertIsNot(obj.positional_metadata.values,
obj_copy.positional_metadata.values)
self.assertIs(obj.positional_metadata.loc[0, 'bar'],
obj_copy.positional_metadata.loc[0, 'bar'])
obj_copy.positional_metadata.loc[0, 'bar'].append(1)
obj_copy.positional_metadata.loc[0, 'baz'] = 43
assert_data_frame_almost_equal(
obj_copy.positional_metadata,
pd.DataFrame({'bar': [[1], [], [], []],
'baz': [43, 42, 42, 42]}))
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'bar': [[1], [], [], []],
'baz': [42, 42, 42, 42]}))
def test_deepcopy_positional_metadata_none(self):
obj = self._positional_metadata_constructor_(3)
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=range(3)))
assert_data_frame_almost_equal(obj_copy.positional_metadata,
pd.DataFrame(index=range(3)))
self.assertIsNot(obj.positional_metadata, obj_copy.positional_metadata)
def test_deepcopy_positional_metadata_empty(self):
obj = self._positional_metadata_constructor_(
3, positional_metadata=pd.DataFrame(index=range(3)))
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=range(3)))
assert_data_frame_almost_equal(obj_copy.positional_metadata,
pd.DataFrame(index=range(3)))
self.assertIsNot(obj.positional_metadata, obj_copy.positional_metadata)
def test_deepcopy_with_positional_metadata(self):
obj = self._positional_metadata_constructor_(
4, positional_metadata={'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]})
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]}, index=range(4)))
assert_data_frame_almost_equal(
obj_copy.positional_metadata,
pd.DataFrame({'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]}, index=range(4)))
self.assertIsNot(obj.positional_metadata, obj_copy.positional_metadata)
self.assertIsNot(obj.positional_metadata.values,
obj_copy.positional_metadata.values)
self.assertIsNot(obj.positional_metadata.loc[0, 'bar'],
obj_copy.positional_metadata.loc[0, 'bar'])
obj_copy.positional_metadata.loc[0, 'bar'].append(1)
obj_copy.positional_metadata.loc[0, 'baz'] = 43
assert_data_frame_almost_equal(
obj_copy.positional_metadata,
pd.DataFrame({'bar': [[1], [], [], []],
'baz': [43, 42, 42, 42]}))
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]}))
def test_deepcopy_memo_is_respected(self):
# Basic test to ensure deepcopy's memo is passed through to recursive
# deepcopy calls.
obj = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
memo = {}
copy.deepcopy(obj, memo)
self.assertGreater(len(memo), 2)
def test_positional_metadata_getter(self):
obj = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [22, 22, 0]})
self.assertIsInstance(obj.positional_metadata, pd.DataFrame)
assert_data_frame_almost_equal(obj.positional_metadata,
| pd.DataFrame({'foo': [22, 22, 0]}) | pandas.DataFrame |
import json
import pandas as pd
#___________________________
def load_tmdb_movies(path):
df = pd.read_csv(path)
df['release_date'] = pd.to_datetime(df['release_date']).apply(lambda x: x.date())
json_columns = ['genres', 'keywords', 'production_countries',
'production_companies', 'spoken_languages']
for column in json_columns:
df[column] = df[column].apply(json.loads)
return df
#___________________________
def load_tmdb_credits(path):
df = pd.read_csv(path)
json_columns = ['cast', 'crew']
for column in json_columns:
df[column] = df[column].apply(json.loads)
return df
#___________________
LOST_COLUMNS = [
'actor_1_facebook_likes',
'actor_2_facebook_likes',
'actor_3_facebook_likes',
'aspect_ratio',
'cast_total_facebook_likes',
'color',
'content_rating',
'director_facebook_likes',
'facenumber_in_poster',
'movie_facebook_likes',
'movie_imdb_link',
'num_critic_for_reviews',
'num_user_for_reviews']
#____________________________________
TMDB_TO_IMDB_SIMPLE_EQUIVALENCIES = {
'budget': 'budget',
'genres': 'genres',
'revenue': 'gross',
'title': 'movie_title',
'runtime': 'duration',
'original_language': 'language',
'keywords': 'plot_keywords',
'vote_count': 'num_voted_users'}
#_____________________________________________________
IMDB_COLUMNS_TO_REMAP = {'imdb_score': 'vote_average'}
#_____________________________________________________
def safe_access(container, index_values):
# return missing value rather than an error upon indexing/key failure
result = container
try:
for idx in index_values:
result = result[idx]
return result
except IndexError or KeyError:
return pd.np.nan
#_____________________________________________________
def get_director(crew_data):
directors = [x['name'] for x in crew_data if x['job'] == 'Director']
return safe_access(directors, [0])
#_____________________________________________________
def pipe_flatten_names(keywords):
return '|'.join([x['name'] for x in keywords])
#_____________________________________________________
def convert_to_original_format(movies, credits):
tmdb_movies = movies.copy()
tmdb_movies.rename(columns=TMDB_TO_IMDB_SIMPLE_EQUIVALENCIES, inplace=True)
tmdb_movies['title_year'] = pd.to_datetime(tmdb_movies['release_date']).apply(lambda x: x.year)
# I'm assuming that the first production country is equivalent, but have not been able to validate this
tmdb_movies['country'] = tmdb_movies['production_countries'].apply(lambda x: safe_access(x, [0, 'name']))
tmdb_movies['language'] = tmdb_movies['spoken_languages'].apply(lambda x: safe_access(x, [0, 'name']))
tmdb_movies['director_name'] = credits['crew'].apply(get_director)
tmdb_movies['actor_1_name'] = credits['cast'].apply(lambda x: safe_access(x, [1, 'name']))
tmdb_movies['actor_2_name'] = credits['cast'].apply(lambda x: safe_access(x, [2, 'name']))
tmdb_movies['actor_3_name'] = credits['cast'].apply(lambda x: safe_access(x, [3, 'name']))
tmdb_movies['genres'] = tmdb_movies['genres'].apply(pipe_flatten_names)
tmdb_movies['plot_keywords'] = tmdb_movies['plot_keywords'].apply(pipe_flatten_names)
return tmdb_movies
# from subprocess import check_output
# print(check_output(["ls", "E:/mlearn/input/tmdb-movie-metadata/"]).decode("utf8"))
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import math, nltk, warnings
from nltk.corpus import wordnet
from sklearn import linear_model
from sklearn.neighbors import NearestNeighbors
from fuzzywuzzy import fuzz
from wordcloud import WordCloud, STOPWORDS
plt.rcParams["patch.force_edgecolor"] = True
plt.style.use('fivethirtyeight')
mpl.rc('patch', edgecolor = 'dimgray', linewidth=1)
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "last_expr"
pd.options.display.max_columns = 50
# %matplotlib inline
warnings.filterwarnings('ignore')
PS = nltk.stem.PorterStemmer()
#__________________
# load the dataset
credits = load_tmdb_credits("./input/tmdb-movie-metadata/tmdb_5000_credits.csv")
movies = load_tmdb_movies("./input/tmdb-movie-metadata/tmdb_5000_movies.csv")
df_initial = convert_to_original_format(movies, credits)
print('Shape:',df_initial.shape)
#__________________________________________
# info on variable types and filling factor
tab_info=pd.DataFrame(df_initial.dtypes).T.rename(index={0:'column type'})
tab_info=tab_info.append(pd.DataFrame(df_initial.isnull().sum()).T.rename(index={0:'null values'}))
tab_info=tab_info.append(pd.DataFrame(df_initial.isnull().sum()/df_initial.shape[0]*100).T.rename(index={0:'null values (%)'}))
print(tab_info)
#获取全部的关键字
set_keywords = set()
for liste_keywords in df_initial['plot_keywords'].str.split('|').values:
if isinstance(liste_keywords, float): continue # only happen if liste_keywords = NaN
set_keywords = set_keywords.union(liste_keywords)
#_________________________
# remove null chain entry
set_keywords.remove('')
print(1)
def count_word(df, ref_col, liste):
keyword_count = dict()
for s in liste: keyword_count[s] = 0
for liste_keywords in df[ref_col].str.split('|'):
if type(liste_keywords) == float and | pd.isnull(liste_keywords) | pandas.isnull |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 21 17:00:53 2017
@author: Dan
"""
# Data cleaning
#爬蟲
import pandas as pd
stockcodeurl = 'http://isin.twse.com.tw/isin/C_public.jsp?strMode=2'
rawdata = pd.read_html(stockcodeurl)[0]
#整理股票代碼及名稱
stockcode=[]
stockname=[]
for i in range(2, 902, 1):
try:
raw = rawdata.iloc[i,0].split('\u3000')
#print(raw)
stockcode.append(rawdata.iloc[i,0].split('\u3000')[0])
stockname.append(rawdata.iloc[i,0].split('\u3000')[1])
except:
stockname.append('全宇生技-KY')
stockcode[651] = str.split('4148 全宇生技-KY')[0]
#整理上市日期(證交所資料從民國81年開始)
start = rawdata.iloc[2:902, 2]
yearstart =[]
monthstart =[]
for i in range(2, 902, 1):
year = start[i].split('/')[0]
month = start[i].split('/')[1]
yearstart.append(year)
monthstart.append(month)
newyearstart = []
newmonthstart = []
for i in range( 0, len(yearstart), 1):
a = int(yearstart[i])
if a <= 1992 :
newyearstart.append(1992)
newmonthstart.append('01')
else:
newyearstart.append(a)
newmonthstart.append(monthstart[i])
#Data processing
#1 上市日期分類: a. 1992年以前就上市 b. 1992年以後才上市
#2 1992年以前上市:
# step1 先把資料抓到今年以前
# step2 在抓當年度資料
#3 1992年以後上市:
# step1 先抓上是當年資料
# step2 抓今年以前的資料
# step3 抓當年度資料
import datetime
urllist =[]
month = ['01','02','03','04','05','06','07','08','09','10','11','12']
thisyear = 2017
thismonth = int(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S").split(' ')[0].split('-')[1])
#getdata('2330')
def getdata(id):
position = stockcode.index(id)
tmp0 = newmonthstart[position]
tmp1 = newyearstart[position]
if tmp0 == '01' and tmp1 == 1992 :
for y in range(tmp1, thisyear, 1):
for m1 in range(0, len(month), 1):
url = 'http://www.twse.com.tw/exchangeReport/STOCK_DAY?response=html&date='+ str(y) + str(month[m1]) + '01' + '&stockNo=' + id
urllist.append(url)
for m2 in range(0, thismonth, 1):
url2 = 'http://www.twse.com.tw/exchangeReport/STOCK_DAY?response=html&date='+ str(thisyear) + str(month[m2]) + '01' + '&stockNo=' + id
urllist.append(url2)
date = []
volumn=[]
op = []
close = []
high = []
low =[]
for l in range(0, len(urllist),1):
df = pd.read_html(urllist[l])[0]
for k in range(0, len(df.index),1):
time = df.iloc[k,0]
time = str(int(time.split('/')[0])+1911) + '-'+ time.split('/')[1] +'-'+ time.split('/')[2]
date.append(time)
volumn.append(df.iloc[k,1])
op.append(df.iloc[k,3])
high.append(df.iloc[k,4])
low.append(df.iloc[k,5])
close.append(df.iloc[k,6])
id = pd.DataFrame({'open':op,'close':close, 'high':high, 'low':low, 'volumn':volumn}, index= date)
print(id)
return(id)
else:
for m1 in range(int(tmp0)-1, 12, 1):
url = 'http://www.twse.com.tw/exchangeReport/STOCK_DAY?response=html&date='+ str(tmp1) + str(month[m1]) + '01' + '&stockNo=' + id
urllist.append(url)
for y in range(tmp1+1, thisyear, 1):
for m2 in range(0, len(month), 1):
url2 = 'http://www.twse.com.tw/exchangeReport/STOCK_DAY?response=html&date='+ str(y) + str(month[m2]) + '01' + '&stockNo=' + id
urllist.append(url2)
for m3 in range(0, thismonth, 1):
url3 = 'http://www.twse.com.tw/exchangeReport/STOCK_DAY?response=html&date='+ str(thisyear) + str(month[m3]) + '01' + '&stockNo=' + id
urllist.append(url3)
date = []
volumn=[]
op = []
close = []
high = []
low =[]
for l in range(0, len(urllist),1):
df = pd.read_html(urllist[l])[0]
for k in range(0, len(df.index),1):
time = df.iloc[k,0]
time = str(int(time.split('/')[0])+1911) + '-'+ time.split('/')[1] +'-'+ time.split('/')[2]
date.append(time)
volumn.append(df.iloc[k,1])
op.append(df.iloc[k,3])
high.append(df.iloc[k,4])
low.append(df.iloc[k,5])
close.append(df.iloc[k,6])
id = pd.DataFrame({'open':op,'close':close, 'high':high, 'low':low, 'volumn':volumn}, index= date)
print(id)
return(id)
#function輸入 getdata2(id, yyyy, m, yyyy, m), eg. getdata2('1101', 2010, 1, 2013, 5)
def getdata2(id, by, bm, ey, em):
position = stockcode.index(id)
tmp0 = newmonthstart[position]
tmp1 = newyearstart[position]
if int(by) < tmp1 :
print('輸入時間超過資料範圍')
elif int(by) == tmp1 and int(bm) < int(tmp0):
print('輸入時間超過資料範圍')
elif int(ey) > thisyear:
print('輸入時間超過資料範圍')
elif int(ey) == thisyear and int(em) > thismonth:
print('輸入時間超過資料範圍')
else:
for m1 in range(int(bm)-1, 12, 1):
url = 'http://www.twse.com.tw/exchangeReport/STOCK_DAY?response=html&date='+ str(by) + str(month[m1]) + '01' + '&stockNo=' + id
urllist.append(url)
for y in range(by+1, ey+1, 1):
for m2 in range(0, len(month), 1):
url2 = 'http://www.twse.com.tw/exchangeReport/STOCK_DAY?response=html&date='+ str(y) + str(month[m2]) + '01' + '&stockNo=' + id
urllist.append(url2)
for m3 in range(0, int(em), 1):
url3 = 'http://www.twse.com.tw/exchangeReport/STOCK_DAY?response=html&date='+ str(ey) + str(month[m3]) + '01' + '&stockNo=' + id
urllist.append(url3)
date = []
volumn=[]
op = []
close = []
high = []
low =[]
for l in range(0, len(urllist),1):
df = | pd.read_html(urllist[l]) | pandas.read_html |
import os
import numpy as np
import pandas as pd
from torch.nn import MSELoss
from easydict import EasyDict as edict
from readability_transformers import ReadabilityTransformer
from readability_transformers.readers import PairwiseDataReader, PredictionDataReader
from readability_transformers.dataset import CommonLitDataset
from readability_transformers.losses import WeightedRankingMSELoss
from readability_transformers.file_utils import load_from_cache_pickle
TEST_CSV_PATH = 'readability_transformers/dataset/data/test.csv'
OUTPUT_CSV_PATH = './'
def get_test_df():
commonlit_data = CommonLitDataset("test")
return commonlit_data.data
def inference_on_dataset():
model = ReadabilityTransformer(
"checkpoints/dump/v16_regression_6",
device="cuda:0",
double=True
)
test_df = get_test_df()
ids = test_df["id"].values
passages = test_df["excerpt"].values
predictions = model.predict(passages, batch_size=3)
# making commmonlit submission
submission = []
for one in zip(ids, predictions.tolist()):
one_id, one_prediction = one
one_submit = {
"id": one_id,
"target": one_prediction
}
submission.append(one_submit)
submission = pd.DataFrame(submission)
submission_path = os.path.join(OUTPUT_CSV_PATH, "submission.csv")
submission.to_csv(submission_path, index=False)
def inference_on_valid_split():
model = ReadabilityTransformer(
"checkpoints/dump/prediction_2",
device="cuda:0",
double=True
)
valid_df = load_from_cache_pickle("preapply", "features_valid_v1.5_lf_trf")
train_df = load_from_cache_pickle("preapply", "features_train_v1.5_lf_trf")
valid_idx = set(list(valid_df.index))
train_idx = set(list(train_df.index))
intersect = train_idx.intersection(valid_idx)
print("intersect", intersect)
print(valid_df.sample(n=3).values)
print(train_df.sample(n=3).values)
# valid_easiest_idx = valid_df["target"].idxmax()
# print("valid_easiest_idx", valid_easiest_idx)
# print(valid_df.loc[valid_easiest_idx].values)
# train_easiest_idx = train_df["target"].idxmax()
# print(train_df.loc[train_easiest_idx].values)
result_df = | pd.read_csv("checkpoints/dump/prediction_2/1_Prediction/evaluation_results.csv") | pandas.read_csv |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
from datetime import datetime
from distutils.version import LooseVersion
import inspect
import sys
import unittest
from io import StringIO
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
from pyspark import StorageLevel
from pyspark.ml.linalg import SparseVector
from pyspark.sql.types import StructType
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.frame import CachedDataFrame
from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
from pyspark.testing.pandasutils import (
have_tabulate,
PandasOnSparkTestCase,
SPARK_CONF_ARROW_ENABLED,
tabulate_requirement_message,
)
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.utils import name_like_string
class DataFrameTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=np.random.rand(9),
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
return pdf, psdf
def test_dataframe(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf["a"] + 1, pdf["a"] + 1)
self.assert_eq(psdf.columns, pd.Index(["a", "b"]))
self.assert_eq(psdf[psdf["b"] > 2], pdf[pdf["b"] > 2])
self.assert_eq(-psdf[psdf["b"] > 2], -pdf[pdf["b"] > 2])
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b.mean(), pdf.b.mean())
self.assert_eq(psdf.b.var(), pdf.b.var())
self.assert_eq(psdf.b.std(), pdf.b.std())
pdf, psdf = self.df_pair
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assertEqual(psdf.a.notnull().rename("x").name, "x")
# check ps.DataFrame(ps.Series)
pser = pd.Series([1, 2, 3], name="x", index=np.random.rand(3))
psser = ps.from_pandas(pser)
self.assert_eq(pd.DataFrame(pser), ps.DataFrame(psser))
# check psdf[pd.Index]
pdf, psdf = self.df_pair
column_mask = pdf.columns.isin(["a", "b"])
index_cols = pdf.columns[column_mask]
self.assert_eq(psdf[index_cols], pdf[index_cols])
def _check_extension(self, psdf, pdf):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(psdf, pdf, check_exact=False)
for dtype in psdf.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assert_eq(psdf, pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1, 2, None, 4], dtype="Int8"),
"b": pd.Series([1, None, None, 4], dtype="Int16"),
"c": pd.Series([1, 2, None, None], dtype="Int32"),
"d": pd.Series([None, 2, None, 4], dtype="Int64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_astype_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": [1, 2, None, 4],
"b": [1, None, None, 4],
"c": [1, 2, None, None],
"d": [None, 2, None, 4],
}
)
psdf = ps.from_pandas(pdf)
astype = {"a": "Int8", "b": "Int16", "c": "Int32", "d": "Int64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_extension_object_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series(["a", "b", None, "c"], dtype="string"),
"b": pd.Series([True, None, False, True], dtype="boolean"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_astype_extension_object_dtypes(self):
pdf = pd.DataFrame({"a": ["a", "b", None, "c"], "b": [True, None, False, True]})
psdf = ps.from_pandas(pdf)
astype = {"a": "string", "b": "boolean"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_extension_float_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, None, 4.0], dtype="Float32"),
"b": pd.Series([1.0, None, 3.0, 4.0], dtype="Float64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + 1, pdf + 1)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_astype_extension_float_dtypes(self):
pdf = pd.DataFrame({"a": [1.0, 2.0, None, 4.0], "b": [1.0, None, 3.0, 4.0]})
psdf = ps.from_pandas(pdf)
astype = {"a": "Float32", "b": "Float64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psser = ps.Series([4, 5, 6])
self.assertRaises(ValueError, lambda: psdf.insert(0, "y", psser))
self.assertRaisesRegex(
ValueError, "cannot insert b, already exists", lambda: psdf.insert(1, "b", 10)
)
self.assertRaisesRegex(
TypeError,
'"column" should be a scalar value or tuple that contains scalar values',
lambda: psdf.insert(0, list("abc"), psser),
)
self.assertRaisesRegex(
TypeError,
"loc must be int",
lambda: psdf.insert((1,), "b", 10),
)
self.assertRaisesRegex(
NotImplementedError,
"Assigning column name as tuple is only supported for MultiIndex columns for now.",
lambda: psdf.insert(0, ("e",), 10),
)
self.assertRaises(ValueError, lambda: psdf.insert(0, "e", [7, 8, 9, 10]))
self.assertRaises(ValueError, lambda: psdf.insert(0, "f", ps.Series([7, 8])))
self.assertRaises(AssertionError, lambda: psdf.insert(100, "y", psser))
self.assertRaises(AssertionError, lambda: psdf.insert(1, "y", psser, allow_duplicates=True))
#
# DataFrame with MultiIndex as columns
#
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
self.assertRaisesRegex(
ValueError, "cannot insert d, already exists", lambda: psdf.insert(4, "d", 11)
)
self.assertRaisesRegex(
ValueError,
r"cannot insert \('x', 'a', 'b'\), already exists",
lambda: psdf.insert(4, ("x", "a", "b"), 11),
)
self.assertRaisesRegex(
ValueError,
'"column" must have length equal to number of column levels.',
lambda: psdf.insert(4, ("e",), 11),
)
def test_inplace(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["a"] = pdf["a"] + 10
psdf["a"] = psdf["a"] + 10
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
def test_assign_list(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
psdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
with self.assertRaisesRegex(ValueError, "Length of values does not match length of index"):
psdf["z"] = [10, 20, 30, 40, 50, 60, 70, 80]
def test_dataframe_multiindex_columns(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["x"], pdf["x"])
self.assert_eq(psdf["y.z"], pdf["y.z"])
self.assert_eq(psdf["x"]["b"], pdf["x"]["b"])
self.assert_eq(psdf["x"]["b"]["2"], pdf["x"]["b"]["2"])
self.assert_eq(psdf.x, pdf.x)
self.assert_eq(psdf.x.b, pdf.x.b)
self.assert_eq(psdf.x.b["2"], pdf.x.b["2"])
self.assertRaises(KeyError, lambda: psdf["z"])
self.assertRaises(AttributeError, lambda: psdf.z)
self.assert_eq(psdf[("x",)], pdf[("x",)])
self.assert_eq(psdf[("x", "a")], pdf[("x", "a")])
self.assert_eq(psdf[("x", "a", "1")], pdf[("x", "a", "1")])
def test_dataframe_column_level_name(self):
column = pd.Index(["A", "B", "C"], name="X")
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=column, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
def test_dataframe_multiindex_names_level(self):
columns = pd.MultiIndex.from_tuples(
[("X", "A", "Z"), ("X", "B", "Z"), ("Y", "C", "Z"), ("Y", "D", "Z")],
names=["lvl_1", "lvl_2", "lv_3"],
)
pdf = pd.DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]],
columns=columns,
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
psdf1 = ps.from_pandas(pdf)
self.assert_eq(psdf1.columns.names, pdf.columns.names)
self.assertRaises(
AssertionError,
lambda: ps.DataFrame(psdf1._internal.copy(column_label_names=("level",))),
)
self.assert_eq(psdf["X"], pdf["X"])
self.assert_eq(psdf["X"].columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"].to_pandas().columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"]["A"], pdf["X"]["A"])
self.assert_eq(psdf["X"]["A"].columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf["X"]["A"].to_pandas().columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf[("X", "A")], pdf[("X", "A")])
self.assert_eq(psdf[("X", "A")].columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A")].to_pandas().columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A", "Z")], pdf[("X", "A", "Z")])
def test_itertuples(self):
pdf = pd.DataFrame({"num_legs": [4, 2], "num_wings": [0, 2]}, index=["dog", "hawk"])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
pdf.itertuples(index=False, name="Animal"), psdf.itertuples(index=False, name="Animal")
):
self.assert_eq(ptuple, ktuple)
for ptuple, ktuple in zip(pdf.itertuples(name=None), psdf.itertuples(name=None)):
self.assert_eq(ptuple, ktuple)
pdf.index = pd.MultiIndex.from_arrays(
[[1, 2], ["black", "brown"]], names=("count", "color")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf.columns = pd.MultiIndex.from_arrays(
[["CA", "WA"], ["age", "children"]], names=("origin", "info")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
(pdf + 1).itertuples(name="num"), (psdf + 1).itertuples(name="num")
):
self.assert_eq(ptuple, ktuple)
# DataFrames with a large number of columns (>254)
pdf = pd.DataFrame(np.random.random((1, 255)))
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="num"), psdf.itertuples(name="num")):
self.assert_eq(ptuple, ktuple)
def test_iterrows(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
for (pdf_k, pdf_v), (psdf_k, psdf_v) in zip(pdf.iterrows(), psdf.iterrows()):
self.assert_eq(pdf_k, psdf_k)
self.assert_eq(pdf_v, psdf_v)
def test_reset_index(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index().index, pdf.reset_index().index)
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.index.name = "a"
psdf.index.name = "a"
with self.assertRaisesRegex(ValueError, "cannot insert a, already exists"):
psdf.reset_index()
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
# inplace
pser = pdf.a
psser = psdf.a
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf.columns = ["index", "b"]
psdf.columns = ["index", "b"]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
def test_reset_index_with_default_index_types(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
with ps.option_context("compute.default_index_type", "sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed-sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(psdf.reset_index().to_pandas().reset_index(drop=True), pdf.reset_index())
def test_reset_index_with_multiindex_columns(self):
index = pd.MultiIndex.from_tuples(
[("bird", "falcon"), ("bird", "parrot"), ("mammal", "lion"), ("mammal", "monkey")],
names=["class", "name"],
)
columns = pd.MultiIndex.from_tuples([("speed", "max"), ("species", "type")])
pdf = pd.DataFrame(
[(389.0, "fly"), (24.0, "fly"), (80.5, "run"), (np.nan, "jump")],
index=index,
columns=columns,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(level="class"), pdf.reset_index(level="class"))
self.assert_eq(
psdf.reset_index(level="class", col_level=1),
pdf.reset_index(level="class", col_level=1),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="species"),
pdf.reset_index(level="class", col_level=1, col_fill="species"),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="genus"),
pdf.reset_index(level="class", col_level=1, col_fill="genus"),
)
with self.assertRaisesRegex(IndexError, "Index has only 2 levels, not 3"):
psdf.reset_index(col_level=2)
pdf.index.names = [("x", "class"), ("y", "name")]
psdf.index.names = [("x", "class"), ("y", "name")]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with self.assertRaisesRegex(ValueError, "Item must have length equal to number of levels."):
psdf.reset_index(col_level=1)
def test_index_to_frame_reset_index(self):
def check(psdf, pdf):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
pdf, psdf = self.df_pair
check(psdf.index.to_frame(), pdf.index.to_frame())
check(psdf.index.to_frame(index=False), pdf.index.to_frame(index=False))
check(psdf.index.to_frame(name="a"), pdf.index.to_frame(name="a"))
check(psdf.index.to_frame(index=False, name="a"), pdf.index.to_frame(index=False, name="a"))
check(psdf.index.to_frame(name=("x", "a")), pdf.index.to_frame(name=("x", "a")))
check(
psdf.index.to_frame(index=False, name=("x", "a")),
pdf.index.to_frame(index=False, name=("x", "a")),
)
def test_multiindex_column_access(self):
columns = pd.MultiIndex.from_tuples(
[
("a", "", "", "b"),
("c", "", "d", ""),
("e", "", "f", ""),
("e", "g", "", ""),
("", "", "", "h"),
("i", "", "", ""),
]
)
pdf = pd.DataFrame(
[
(1, "a", "x", 10, 100, 1000),
(2, "b", "y", 20, 200, 2000),
(3, "c", "z", 30, 300, 3000),
],
columns=columns,
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["a"], pdf["a"])
self.assert_eq(psdf["a"]["b"], pdf["a"]["b"])
self.assert_eq(psdf["c"], pdf["c"])
self.assert_eq(psdf["c"]["d"], pdf["c"]["d"])
self.assert_eq(psdf["e"], pdf["e"])
self.assert_eq(psdf["e"][""]["f"], pdf["e"][""]["f"])
self.assert_eq(psdf["e"]["g"], pdf["e"]["g"])
self.assert_eq(psdf[""], pdf[""])
self.assert_eq(psdf[""]["h"], pdf[""]["h"])
self.assert_eq(psdf["i"], pdf["i"])
self.assert_eq(psdf[["a", "e"]], pdf[["a", "e"]])
self.assert_eq(psdf[["e", "a"]], pdf[["e", "a"]])
self.assert_eq(psdf[("a",)], pdf[("a",)])
self.assert_eq(psdf[("e", "g")], pdf[("e", "g")])
# self.assert_eq(psdf[("i",)], pdf[("i",)])
self.assert_eq(psdf[("i", "")], pdf[("i", "")])
self.assertRaises(KeyError, lambda: psdf[("a", "b")])
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df.__repr__()
df["a"] = df["id"]
self.assertEqual(df.__repr__(), df.to_pandas().__repr__())
def test_repr_html_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df._repr_html_()
df["a"] = df["id"]
self.assertEqual(df._repr_html_(), df.to_pandas()._repr_html_())
def test_empty_dataframe(self):
pdf = pd.DataFrame({"a": pd.Series([], dtype="i1"), "b": pd.Series([], dtype="str")})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_all_null_dataframe(self):
pdf = pd.DataFrame(
{
"a": [None, None, None, "a"],
"b": [None, None, None, 1],
"c": [None, None, None] + list(np.arange(1, 2).astype("i1")),
"d": [None, None, None, 1.0],
"e": [None, None, None, True],
"f": [None, None, None] + list(pd.date_range("20130101", periods=1)),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
pdf = pd.DataFrame(
{
"a": pd.Series([None, None, None], dtype="float64"),
"b": pd.Series([None, None, None], dtype="str"),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_nullable_object(self):
pdf = pd.DataFrame(
{
"a": list("abc") + [np.nan, None],
"b": list(range(1, 4)) + [np.nan, None],
"c": list(np.arange(3, 6).astype("i1")) + [np.nan, None],
"d": list(np.arange(4.0, 7.0, dtype="float64")) + [np.nan, None],
"e": [True, False, True, np.nan, None],
"f": list(pd.date_range("20130101", periods=3)) + [np.nan, None],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_assign(self):
pdf, psdf = self.df_pair
psdf["w"] = 1.0
pdf["w"] = 1.0
self.assert_eq(psdf, pdf)
psdf.w = 10.0
pdf.w = 10.0
self.assert_eq(psdf, pdf)
psdf[1] = 1.0
pdf[1] = 1.0
self.assert_eq(psdf, pdf)
psdf = psdf.assign(a=psdf["a"] * 2)
pdf = pdf.assign(a=pdf["a"] * 2)
self.assert_eq(psdf, pdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "w"), ("y", "v")])
pdf.columns = columns
psdf.columns = columns
psdf[("a", "c")] = "def"
pdf[("a", "c")] = "def"
self.assert_eq(psdf, pdf)
psdf = psdf.assign(Z="ZZ")
pdf = pdf.assign(Z="ZZ")
self.assert_eq(psdf, pdf)
psdf["x"] = "ghi"
pdf["x"] = "ghi"
self.assert_eq(psdf, pdf)
def test_head(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.head(2), pdf.head(2))
self.assert_eq(psdf.head(3), pdf.head(3))
self.assert_eq(psdf.head(0), pdf.head(0))
self.assert_eq(psdf.head(-3), pdf.head(-3))
self.assert_eq(psdf.head(-10), pdf.head(-10))
with option_context("compute.ordered_head", True):
self.assert_eq(psdf.head(), pdf.head())
def test_attributes(self):
psdf = self.psdf
self.assertIn("a", dir(psdf))
self.assertNotIn("foo", dir(psdf))
self.assertRaises(AttributeError, lambda: psdf.foo)
psdf = ps.DataFrame({"a b c": [1, 2, 3]})
self.assertNotIn("a b c", dir(psdf))
psdf = ps.DataFrame({"a": [1, 2], 5: [1, 2]})
self.assertIn("a", dir(psdf))
self.assertNotIn(5, dir(psdf))
def test_column_names(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.columns, pdf.columns)
self.assert_eq(psdf[["b", "a"]].columns, pdf[["b", "a"]].columns)
self.assert_eq(psdf["a"].name, pdf["a"].name)
self.assert_eq((psdf["a"] + 1).name, (pdf["a"] + 1).name)
self.assert_eq((psdf.a + psdf.b).name, (pdf.a + pdf.b).name)
self.assert_eq((psdf.a + psdf.b.rename("a")).name, (pdf.a + pdf.b.rename("a")).name)
self.assert_eq((psdf.a + psdf.b.rename()).name, (pdf.a + pdf.b.rename()).name)
self.assert_eq((psdf.a.rename() + psdf.b).name, (pdf.a.rename() + pdf.b).name)
self.assert_eq(
(psdf.a.rename() + psdf.b.rename()).name, (pdf.a.rename() + pdf.b.rename()).name
)
def test_rename_columns(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
psdf.columns = ["x", "y"]
pdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
columns = pdf.columns
columns.name = "lvl_1"
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1"])
self.assert_eq(psdf, pdf)
msg = "Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with self.assertRaisesRegex(ValueError, msg):
psdf.columns = [1, 2, 3, 4]
# Multi-index columns
pdf = pd.DataFrame(
{("A", "0"): [1, 2, 2, 3], ("B", "1"): [1, 2, 3, 4]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
columns = pdf.columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
pdf.columns = ["x", "y"]
psdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
columns.names = ["lvl_1", "lvl_2"]
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1", "lvl_2"])
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
def test_rename_dataframe(self):
pdf1 = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
psdf1 = ps.from_pandas(pdf1)
self.assert_eq(
psdf1.rename(columns={"A": "a", "B": "b"}), pdf1.rename(columns={"A": "a", "B": "b"})
)
result_psdf = psdf1.rename(index={1: 10, 2: 20})
result_pdf = pdf1.rename(index={1: 10, 2: 20})
self.assert_eq(result_psdf, result_pdf)
# inplace
pser = result_pdf.A
psser = result_psdf.A
result_psdf.rename(index={10: 100, 20: 200}, inplace=True)
result_pdf.rename(index={10: 100, 20: 200}, inplace=True)
self.assert_eq(result_psdf, result_pdf)
self.assert_eq(psser, pser)
def str_lower(s) -> str:
return str.lower(s)
self.assert_eq(
psdf1.rename(str_lower, axis="columns"), pdf1.rename(str_lower, axis="columns")
)
def mul10(x) -> int:
return x * 10
self.assert_eq(psdf1.rename(mul10, axis="index"), pdf1.rename(mul10, axis="index"))
self.assert_eq(
psdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
pdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
)
idx = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Y", "D")])
pdf2 = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf2.rename(columns=str_lower), pdf2.rename(columns=str_lower))
self.assert_eq(
psdf2.rename(columns=str_lower, level=0), pdf2.rename(columns=str_lower, level=0)
)
self.assert_eq(
psdf2.rename(columns=str_lower, level=1), pdf2.rename(columns=str_lower, level=1)
)
pdf3 = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list("ab"))
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(psdf3.rename(index=str_lower), pdf3.rename(index=str_lower))
self.assert_eq(
psdf3.rename(index=str_lower, level=0), pdf3.rename(index=str_lower, level=0)
)
self.assert_eq(
psdf3.rename(index=str_lower, level=1), pdf3.rename(index=str_lower, level=1)
)
pdf4 = pdf2 + 1
psdf4 = psdf2 + 1
self.assert_eq(psdf4.rename(columns=str_lower), pdf4.rename(columns=str_lower))
pdf5 = pdf3 + 1
psdf5 = psdf3 + 1
self.assert_eq(psdf5.rename(index=str_lower), pdf5.rename(index=str_lower))
msg = "Either `index` or `columns` should be provided."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename()
msg = "`mapper` or `index` or `columns` should be either dict-like or function type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename(mapper=[str_lower], axis=1)
msg = "Mapper dict should have the same value type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename({"A": "a", "B": 2}, axis=1)
msg = r"level should be an integer between \[0, column_labels_level\)"
with self.assertRaisesRegex(ValueError, msg):
psdf2.rename(columns=str_lower, level=2)
def test_rename_axis(self):
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis("index2", axis=axis).sort_index(),
psdf.rename_axis("index2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["index2"], axis=axis).sort_index(),
psdf.rename_axis(["index2"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis("cols2", axis=axis).sort_index(),
psdf.rename_axis("cols2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["cols2"], axis=axis).sort_index(),
psdf.rename_axis(["cols2"], axis=axis).sort_index(),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pdf2.rename_axis("index2", axis="index", inplace=True)
psdf2.rename_axis("index2", axis="index", inplace=True)
self.assert_eq(pdf2.sort_index(), psdf2.sort_index())
self.assertRaises(ValueError, lambda: psdf.rename_axis(["index2", "index3"], axis=0))
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols2", "cols3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.rename_axis(mapper=["index2"], index=["index3"]))
self.assert_eq(
pdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
psdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index2"}, columns={"missing": "cols2"}).sort_index(),
psdf.rename_axis(
index={"missing": "index2"}, columns={"missing": "cols2"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
psdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
psdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
)
self.assertRaises(
ValueError, lambda: psdf.rename_axis(["index3", "index4", "index5"], axis=0)
)
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols3", "cols4", "cols5"], axis=1))
self.assert_eq(
pdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
psdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index3"}, columns={"missing": "cols3"}).sort_index(),
psdf.rename_axis(
index={"missing": "index3"}, columns={"missing": "cols3"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
psdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
def test_dot(self):
psdf = self.psdf
with self.assertRaisesRegex(TypeError, "Unsupported type DataFrame"):
psdf.dot(psdf)
def test_dot_in_column_name(self):
self.assert_eq(
ps.DataFrame(ps.range(1)._internal.spark_frame.selectExpr("1L as `a.b`"))["a.b"],
ps.Series([1], name="a.b"),
)
def test_aggregate(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=["A", "B", "C"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(), # TODO?: fix column order
pdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(),
)
self.assert_eq(
psdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
pdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
)
self.assertRaises(KeyError, lambda: psdf.agg({"A": ["sum", "min"], "X": ["min", "max"]}))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
pdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
pdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.agg({"X": ["sum", "min"], "Y": ["min", "max"]}))
# non-string names
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=[10, 20, 30]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
pdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
)
self.assert_eq(
psdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
pdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", 10), ("X", 20), ("Y", 30)])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
pdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
pdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
)
pdf = pd.DataFrame(
[datetime(2019, 2, 2, 0, 0, 0, 0), datetime(2019, 2, 3, 0, 0, 0, 0)],
columns=["timestamp"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.timestamp.min(), pdf.timestamp.min())
self.assert_eq(psdf.timestamp.max(), pdf.timestamp.max())
self.assertRaises(ValueError, lambda: psdf.agg(("sum", "min")))
def test_droplevel(self):
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
pdf.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
psdf = ps.from_pandas(pdf)
self.assertRaises(ValueError, lambda: psdf.droplevel(["a", "b"]))
self.assertRaises(ValueError, lambda: psdf.droplevel([1, 1, 1, 1, 1]))
self.assertRaises(IndexError, lambda: psdf.droplevel(2))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a"}))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a": 1}))
self.assertRaises(ValueError, lambda: psdf.droplevel(["level_1", "level_2"], axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(2, axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1"}, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1": 1}, axis=1))
self.assert_eq(pdf.droplevel("a"), psdf.droplevel("a"))
self.assert_eq(pdf.droplevel(["a"]), psdf.droplevel(["a"]))
self.assert_eq(pdf.droplevel(("a",)), psdf.droplevel(("a",)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel("level_1", axis=1), psdf.droplevel("level_1", axis=1))
self.assert_eq(pdf.droplevel(["level_1"], axis=1), psdf.droplevel(["level_1"], axis=1))
self.assert_eq(pdf.droplevel(("level_1",), axis=1), psdf.droplevel(("level_1",), axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
self.assert_eq(pdf.droplevel(-1, axis=1), psdf.droplevel(-1, axis=1))
# Tupled names
pdf.columns.names = [("level", 1), ("level", 2)]
pdf.index.names = [("a", 10), ("x", 20)]
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.droplevel("a"))
self.assertRaises(KeyError, lambda: psdf.droplevel(("a", 10)))
self.assert_eq(pdf.droplevel([("a", 10)]), psdf.droplevel([("a", 10)]))
self.assert_eq(
pdf.droplevel([("level", 1)], axis=1), psdf.droplevel([("level", 1)], axis=1)
)
# non-string names
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis([10.0, 20.0])
)
pdf.columns = pd.MultiIndex.from_tuples([("c", "e"), ("d", "f")], names=[100.0, 200.0])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.droplevel(10.0), psdf.droplevel(10.0))
self.assert_eq(pdf.droplevel([10.0]), psdf.droplevel([10.0]))
self.assert_eq(pdf.droplevel((10.0,)), psdf.droplevel((10.0,)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel(100.0, axis=1), psdf.droplevel(100.0, axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
def test_drop(self):
pdf = pd.DataFrame({"x": [1, 2], "y": [3, 4], "z": [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
# Assert 'labels' or 'columns' parameter is set
expected_error_message = "Need to specify at least one of 'labels' or 'columns'"
with self.assertRaisesRegex(ValueError, expected_error_message):
psdf.drop()
#
# Drop columns
#
# Assert using a str for 'labels' works
self.assert_eq(psdf.drop("x", axis=1), pdf.drop("x", axis=1))
self.assert_eq((psdf + 1).drop("x", axis=1), (pdf + 1).drop("x", axis=1))
# Assert using a list for 'labels' works
self.assert_eq(psdf.drop(["y", "z"], axis=1), pdf.drop(["y", "z"], axis=1))
self.assert_eq(psdf.drop(["x", "y", "z"], axis=1), pdf.drop(["x", "y", "z"], axis=1))
# Assert using 'columns' instead of 'labels' produces the same results
self.assert_eq(psdf.drop(columns="x"), pdf.drop(columns="x"))
self.assert_eq(psdf.drop(columns=["y", "z"]), pdf.drop(columns=["y", "z"]))
self.assert_eq(psdf.drop(columns=["x", "y", "z"]), pdf.drop(columns=["x", "y", "z"]))
self.assert_eq(psdf.drop(columns=[]), pdf.drop(columns=[]))
columns = pd.MultiIndex.from_tuples([(1, "x"), (1, "y"), (2, "z")])
pdf.columns = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(columns=1), pdf.drop(columns=1))
self.assert_eq(psdf.drop(columns=(1, "x")), pdf.drop(columns=(1, "x")))
self.assert_eq(psdf.drop(columns=[(1, "x"), 2]), pdf.drop(columns=[(1, "x"), 2]))
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
self.assertRaises(KeyError, lambda: psdf.drop(columns=3))
self.assertRaises(KeyError, lambda: psdf.drop(columns=(1, "z")))
pdf.index = pd.MultiIndex.from_tuples([("i", 0), ("j", 1)])
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
# non-string names
pdf = pd.DataFrame({10: [1, 2], 20: [3, 4], 30: [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(10, axis=1), pdf.drop(10, axis=1))
self.assert_eq(psdf.drop([20, 30], axis=1), pdf.drop([20, 30], axis=1))
#
# Drop rows
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
# Given labels (and axis = 0)
self.assert_eq(psdf.drop(labels="A", axis=0), pdf.drop(labels="A", axis=0))
self.assert_eq(psdf.drop(labels="A"), pdf.drop(labels="A"))
self.assert_eq((psdf + 1).drop(labels="A"), (pdf + 1).drop(labels="A"))
self.assert_eq(psdf.drop(labels=["A", "C"], axis=0), pdf.drop(labels=["A", "C"], axis=0))
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
# Given index
self.assert_eq(psdf.drop(index="A"), pdf.drop(index="A"))
self.assert_eq(psdf.drop(index=["A", "C"]), pdf.drop(index=["A", "C"]))
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
self.assert_eq(psdf.drop(index=[]), pdf.drop(index=[]))
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
# Non-string names
pdf.index = [10, 20, 30]
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(labels=10, axis=0), pdf.drop(labels=10, axis=0))
self.assert_eq(psdf.drop(labels=[10, 30], axis=0), pdf.drop(labels=[10, 30], axis=0))
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
# MultiIndex
pdf.index = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assertRaises(NotImplementedError, lambda: psdf.drop(labels=[("a", "x")]))
#
# Drop rows and columns
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(index="A", columns="X"), pdf.drop(index="A", columns="X"))
self.assert_eq(
psdf.drop(index=["A", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=[], columns=["X", "Z"]),
pdf.drop(index=[], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=[]),
pdf.drop(index=["A", "B", "C"], columns=[]),
)
self.assert_eq(
psdf.drop(index=[], columns=[]),
pdf.drop(index=[], columns=[]),
)
self.assertRaises(
ValueError,
lambda: psdf.drop(labels="A", axis=0, columns="X"),
)
def _test_dropna(self, pdf, axis):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(psdf.dropna(axis=axis, subset=["x"]), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(psdf.dropna(axis=axis, subset="x"), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"]), pdf.dropna(axis=axis, subset=["y", "z"])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"], how="all"),
pdf.dropna(axis=axis, subset=["y", "z"], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
pdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pser = pdf2[pdf2.columns[0]]
psser = psdf2[psdf2.columns[0]]
pdf2.dropna(inplace=True, axis=axis)
psdf2.dropna(inplace=True, axis=axis)
self.assert_eq(psdf2, pdf2)
self.assert_eq(psser, pser)
# multi-index
columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")])
if axis == 0:
pdf.columns = columns
else:
pdf.index = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "x")]), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=("a", "x")), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
)
def test_dropna_axis_index(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self._test_dropna(pdf, axis=0)
# empty
pdf = pd.DataFrame(index=np.random.rand(6))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(), pdf.dropna())
self.assert_eq(psdf.dropna(how="all"), pdf.dropna(how="all"))
self.assert_eq(psdf.dropna(thresh=0), pdf.dropna(thresh=0))
self.assert_eq(psdf.dropna(thresh=1), pdf.dropna(thresh=1))
with self.assertRaisesRegex(ValueError, "No axis named foo"):
psdf.dropna(axis="foo")
self.assertRaises(KeyError, lambda: psdf.dropna(subset="1"))
with self.assertRaisesRegex(ValueError, "invalid how option: 1"):
psdf.dropna(how=1)
with self.assertRaisesRegex(TypeError, "must specify how or thresh"):
psdf.dropna(how=None)
def test_dropna_axis_column(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=[str(r) for r in np.random.rand(6)],
).T
self._test_dropna(pdf, axis=1)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
ValueError, "The length of each subset must be the same as the index size."
):
psdf.dropna(subset=(["x", "y"]), axis=1)
# empty
pdf = pd.DataFrame({"x": [], "y": [], "z": []})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=1), pdf.dropna(axis=1))
self.assert_eq(psdf.dropna(axis=1, how="all"), pdf.dropna(axis=1, how="all"))
self.assert_eq(psdf.dropna(axis=1, thresh=0), pdf.dropna(axis=1, thresh=0))
self.assert_eq(psdf.dropna(axis=1, thresh=1), pdf.dropna(axis=1, thresh=1))
def test_dtype(self):
pdf = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
# multi-index columns
columns = pd.MultiIndex.from_tuples(zip(list("xxxyyz"), list("abcdef")))
pdf.columns = columns
psdf.columns = columns
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
def test_fillna(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({"x": -1, "y": -2, "z": -5}), pdf.fillna({"x": -1, "y": -2, "z": -5})
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
pdf = pdf.set_index(["x", "y"])
psdf = ps.from_pandas(pdf)
# check multi index
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
pser = pdf.z
psser = psdf.z
pdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
psdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
s_nan = pd.Series([-1, -2, -5], index=["x", "y", "z"], dtype=int)
self.assert_eq(psdf.fillna(s_nan), pdf.fillna(s_nan))
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis=1)
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis="columns")
with self.assertRaisesRegex(ValueError, "limit parameter for value is not support now"):
psdf.fillna(-1, limit=1)
with self.assertRaisesRegex(TypeError, "Unsupported.*DataFrame"):
psdf.fillna(pd.DataFrame({"x": [-1], "y": [-1], "z": [-1]}))
with self.assertRaisesRegex(TypeError, "Unsupported.*int64"):
psdf.fillna({"x": np.int64(-6), "y": np.int64(-4), "z": -5})
with self.assertRaisesRegex(ValueError, "Expecting 'pad', 'ffill', 'backfill' or 'bfill'."):
psdf.fillna(method="xxx")
with self.assertRaisesRegex(
ValueError, "Must specify a fillna 'value' or 'method' parameter."
):
psdf.fillna()
# multi-index columns
pdf = pd.DataFrame(
{
("x", "a"): [np.nan, 2, 3, 4, np.nan, 6],
("x", "b"): [1, 2, np.nan, 4, np.nan, np.nan],
("y", "c"): [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
self.assert_eq(psdf.fillna({"x": -1}), pdf.fillna({"x": -1}))
self.assert_eq(
psdf.fillna({"x": -1, ("x", "b"): -2}), pdf.fillna({"x": -1, ("x", "b"): -2})
)
self.assert_eq(
psdf.fillna({("x", "b"): -2, "x": -1}), pdf.fillna({("x", "b"): -2, "x": -1})
)
# check multi index
pdf = pdf.set_index([("x", "a"), ("x", "b")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
def test_isnull(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")}, index=np.random.rand(6)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.notnull(), pdf.notnull())
self.assert_eq(psdf.isnull(), pdf.isnull())
def test_to_datetime(self):
pdf = pd.DataFrame(
{"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}, index=np.random.rand(2)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pd.to_datetime(pdf), ps.to_datetime(psdf))
def test_nunique(self):
pdf = pd.DataFrame({"A": [1, 2, 3], "B": [np.nan, 3, np.nan]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
# Assert NaNs are dropped by default
self.assert_eq(psdf.nunique(), pdf.nunique())
# Assert including NaN values
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
# Assert approximate counts
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True),
pd.Series([103], index=["A"]),
)
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True, rsd=0.01),
pd.Series([100], index=["A"]),
)
# Assert unsupported axis value yet
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.nunique(axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")], names=["1", "2"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.nunique(), pdf.nunique())
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
def test_sort_values(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values("b"), pdf.sort_values("b"))
for ascending in [True, False]:
for na_position in ["first", "last"]:
self.assert_eq(
psdf.sort_values("a", ascending=ascending, na_position=na_position),
pdf.sort_values("a", ascending=ascending, na_position=na_position),
)
self.assert_eq(psdf.sort_values(["a", "b"]), pdf.sort_values(["a", "b"]))
self.assert_eq(
psdf.sort_values(["a", "b"], ascending=[False, True]),
pdf.sort_values(["a", "b"], ascending=[False, True]),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], ascending=[False]))
self.assert_eq(
psdf.sort_values(["a", "b"], na_position="first"),
pdf.sort_values(["a", "b"], na_position="first"),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], na_position="invalid"))
pserA = pdf.a
psserA = psdf.a
self.assert_eq(psdf.sort_values("b", inplace=True), pdf.sort_values("b", inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# multi-index columns
pdf = pd.DataFrame(
{("X", 10): [1, 2, 3, 4, 5, None, 7], ("X", 20): [7, 6, 5, 4, 3, 2, 1]},
index=np.random.rand(7),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(("X", 20)), pdf.sort_values(("X", 20)))
self.assert_eq(
psdf.sort_values([("X", 20), ("X", 10)]), pdf.sort_values([("X", 20), ("X", 10)])
)
self.assertRaisesRegex(
ValueError,
"For a multi-index, the label must be a tuple with elements",
lambda: psdf.sort_values(["X"]),
)
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 3, 4, 5, None, 7], 20: [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(20), pdf.sort_values(20))
self.assert_eq(psdf.sort_values([20, 10]), pdf.sort_values([20, 10]))
def test_sort_index(self):
pdf = pd.DataFrame(
{"A": [2, 1, np.nan], "B": [np.nan, 0, np.nan]}, index=["b", "a", np.nan]
)
psdf = ps.from_pandas(pdf)
# Assert invalid parameters
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(axis=1))
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(kind="mergesort"))
self.assertRaises(ValueError, lambda: psdf.sort_index(na_position="invalid"))
# Assert default behavior without parameters
self.assert_eq(psdf.sort_index(), pdf.sort_index())
# Assert sorting descending
self.assert_eq(psdf.sort_index(ascending=False), pdf.sort_index(ascending=False))
# Assert sorting NA indices first
self.assert_eq(psdf.sort_index(na_position="first"), pdf.sort_index(na_position="first"))
# Assert sorting descending and NA indices first
self.assert_eq(
psdf.sort_index(ascending=False, na_position="first"),
pdf.sort_index(ascending=False, na_position="first"),
)
# Assert sorting inplace
pserA = pdf.A
psserA = psdf.A
self.assertEqual(psdf.sort_index(inplace=True), pdf.sort_index(inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# Assert multi-indices
pdf = pd.DataFrame(
{"A": range(4), "B": range(4)[::-1]}, index=[["b", "b", "a", "a"], [1, 0, 1, 0]]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psdf.sort_index(level=[1, 0]), pdf.sort_index(level=[1, 0]))
self.assert_eq(psdf.reset_index().sort_index(), pdf.reset_index().sort_index())
# Assert with multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_swaplevel(self):
# MultiIndex with two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
# MultiIndex with more than two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"], ["l", "m", "s", "xs"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(0, 2), psdf.swaplevel(0, 2))
self.assert_eq(pdf.swaplevel(1, 2), psdf.swaplevel(1, 2))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel(-1, -2), psdf.swaplevel(-1, -2))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
self.assert_eq(pdf.swaplevel("number", "size"), psdf.swaplevel("number", "size"))
self.assert_eq(pdf.swaplevel("color", "size"), psdf.swaplevel("color", "size"))
self.assert_eq(
pdf.swaplevel("color", "size", axis="index"),
psdf.swaplevel("color", "size", axis="index"),
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=0), psdf.swaplevel("color", "size", axis=0)
)
pdf = pd.DataFrame(
{
"x1": ["a", "b", "c", "d"],
"x2": ["a", "b", "c", "d"],
"x3": ["a", "b", "c", "d"],
"x4": ["a", "b", "c", "d"],
}
)
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf.columns = pidx
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(axis=1), psdf.swaplevel(axis=1))
self.assert_eq(pdf.swaplevel(0, 1, axis=1), psdf.swaplevel(0, 1, axis=1))
self.assert_eq(pdf.swaplevel(0, 2, axis=1), psdf.swaplevel(0, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 2, axis=1), psdf.swaplevel(1, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 1, axis=1), psdf.swaplevel(1, 1, axis=1))
self.assert_eq(pdf.swaplevel(-1, -2, axis=1), psdf.swaplevel(-1, -2, axis=1))
self.assert_eq(
pdf.swaplevel("number", "color", axis=1), psdf.swaplevel("number", "color", axis=1)
)
self.assert_eq(
pdf.swaplevel("number", "size", axis=1), psdf.swaplevel("number", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=1), psdf.swaplevel("color", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis="columns"),
psdf.swaplevel("color", "size", axis="columns"),
)
# Error conditions
self.assertRaises(AssertionError, lambda: ps.DataFrame([1, 2]).swaplevel())
self.assertRaises(IndexError, lambda: psdf.swaplevel(0, 9, axis=1))
self.assertRaises(KeyError, lambda: psdf.swaplevel("not_number", "color", axis=1))
self.assertRaises(ValueError, lambda: psdf.swaplevel(axis=2))
def test_swapaxes(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["x", "y", "z"], columns=["a", "b", "c"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.swapaxes(0, 1), pdf.swapaxes(0, 1))
self.assert_eq(psdf.swapaxes(1, 0), pdf.swapaxes(1, 0))
self.assert_eq(psdf.swapaxes("index", "columns"), pdf.swapaxes("index", "columns"))
self.assert_eq(psdf.swapaxes("columns", "index"), pdf.swapaxes("columns", "index"))
self.assert_eq((psdf + 1).swapaxes(0, 1), (pdf + 1).swapaxes(0, 1))
self.assertRaises(AssertionError, lambda: psdf.swapaxes(0, 1, copy=False))
self.assertRaises(ValueError, lambda: psdf.swapaxes(0, -1))
def test_nlargest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nlargest(n=5, columns="a"), pdf.nlargest(5, columns="a"))
self.assert_eq(psdf.nlargest(n=5, columns=["a", "b"]), pdf.nlargest(5, columns=["a", "b"]))
def test_nsmallest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nsmallest(n=5, columns="a"), pdf.nsmallest(5, columns="a"))
self.assert_eq(
psdf.nsmallest(n=5, columns=["a", "b"]), pdf.nsmallest(5, columns=["a", "b"])
)
def test_xs(self):
d = {
"num_legs": [4, 4, 2, 2],
"num_wings": [0, 0, 2, 2],
"class": ["mammal", "mammal", "mammal", "bird"],
"animal": ["cat", "dog", "bat", "penguin"],
"locomotion": ["walks", "walks", "flies", "walks"],
}
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "locomotion"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs("mammal"), pdf.xs("mammal"))
self.assert_eq(psdf.xs(("mammal",)), pdf.xs(("mammal",)))
self.assert_eq(psdf.xs(("mammal", "dog", "walks")), pdf.xs(("mammal", "dog", "walks")))
self.assert_eq(
ps.concat([psdf, psdf]).xs(("mammal", "dog", "walks")),
pd.concat([pdf, pdf]).xs(("mammal", "dog", "walks")),
)
self.assert_eq(psdf.xs("cat", level=1), pdf.xs("cat", level=1))
self.assert_eq(psdf.xs("flies", level=2), pdf.xs("flies", level=2))
self.assert_eq(psdf.xs("mammal", level=-3), pdf.xs("mammal", level=-3))
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.xs("num_wings", axis=1)
with self.assertRaises(KeyError):
psdf.xs(("mammal", "dog", "walk"))
msg = r"'Key length \(4\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
psdf.xs(("mammal", "dog", "walks", "foo"))
msg = "'key' should be a scalar value or tuple that contains scalar values"
with self.assertRaisesRegex(TypeError, msg):
psdf.xs(["mammal", "dog", "walks", "foo"])
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=-4))
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=3))
self.assertRaises(KeyError, lambda: psdf.xs(("dog", "walks"), level=1))
# non-string names
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "num_legs", "num_wings"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs(("mammal", "dog", 4)), pdf.xs(("mammal", "dog", 4)))
self.assert_eq(psdf.xs(2, level=2), pdf.xs(2, level=2))
self.assert_eq((psdf + "a").xs(("mammal", "dog", 4)), (pdf + "a").xs(("mammal", "dog", 4)))
self.assert_eq((psdf + "a").xs(2, level=2), (pdf + "a").xs(2, level=2))
def test_missing(self):
psdf = self.psdf
missing_functions = inspect.getmembers(_MissingPandasLikeDataFrame, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)()
missing_properties = inspect.getmembers(
_MissingPandasLikeDataFrame, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)
def test_to_numpy(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.to_numpy(), pdf.values)
def test_to_pandas(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.to_pandas(), pdf)
def test_isin(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.isin([4, "six"]), pdf.isin([4, "six"]))
# Seems like pandas has a bug when passing `np.array` as parameter
self.assert_eq(psdf.isin(np.array([4, "six"])), pdf.isin([4, "six"]))
self.assert_eq(
psdf.isin({"a": [2, 8], "c": ["three", "one"]}),
pdf.isin({"a": [2, 8], "c": ["three", "one"]}),
)
self.assert_eq(
psdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
pdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
)
msg = "'DataFrame' object has no attribute {'e'}"
with self.assertRaisesRegex(AttributeError, msg):
psdf.isin({"e": [5, 7], "a": [1, 6]})
msg = "DataFrame and Series are not supported"
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.isin(pdf)
msg = "Values should be iterable, Series, DataFrame or dict."
with self.assertRaisesRegex(TypeError, msg):
psdf.isin(1)
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, None, 9, 4, None, 4],
"c": [None, 5, None, 3, 2, 1],
},
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), pdf.isin([4, 3, 1, 1, None]))
else:
expected = pd.DataFrame(
{
"a": [True, False, True, True, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, True, False, True],
}
)
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), expected)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
psdf.isin({"b": [4, 3, 1, 1, None]}), pdf.isin({"b": [4, 3, 1, 1, None]})
)
else:
expected = pd.DataFrame(
{
"a": [False, False, False, False, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, False, False, False],
}
)
self.assert_eq(psdf.isin({"b": [4, 3, 1, 1, None]}), expected)
def test_merge(self):
left_pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"value": [1, 2, 3, 5, 6, 7],
"x": list("abcdef"),
},
columns=["lkey", "value", "x"],
)
right_pdf = pd.DataFrame(
{
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [4, 5, 6, 7, 8, 9],
"y": list("efghij"),
},
columns=["rkey", "value", "y"],
)
right_ps = pd.Series(list("defghi"), name="x", index=[5, 6, 7, 8, 9, 10])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
right_psser = ps.from_pandas(right_ps)
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, on=("value",)))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
# MultiIndex
check(
lambda left, right: left.merge(
right, left_on=["lkey", "value"], right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.set_index(["lkey", "value"]).merge(
right, left_index=True, right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.merge(
right.set_index(["rkey", "value"]), left_on=["lkey", "value"], right_index=True
)
)
# TODO: when both left_index=True and right_index=True with multi-index
# check(lambda left, right: left.set_index(['lkey', 'value']).merge(
# right.set_index(['rkey', 'value']), left_index=True, right_index=True))
# join types
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, on="value", how=how))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey", how=how))
# suffix
check(
lambda left, right: left.merge(
right, left_on="lkey", right_on="rkey", suffixes=["_left", "_right"]
)
)
# Test Series on the right
check(lambda left, right: left.merge(right), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x"), right_psser, right_ps
)
check(
lambda left, right: left.set_index("x").merge(right, left_index=True, right_on="x"),
right_psser,
right_ps,
)
# Test join types with Series
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, how=how), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x", how=how),
right_psser,
right_ps,
)
# suffix with Series
check(
lambda left, right: left.merge(
right,
suffixes=["_left", "_right"],
how="outer",
left_index=True,
right_index=True,
),
right_psser,
right_ps,
)
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([(10, "lkey"), (10, "value"), (20, "x")])
left_pdf.columns = left_columns
left_psdf.columns = left_columns
right_columns = pd.MultiIndex.from_tuples([(10, "rkey"), (10, "value"), (30, "y")])
right_pdf.columns = right_columns
right_psdf.columns = right_columns
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[(10, "value")]))
check(
lambda left, right: (left.set_index((10, "lkey")).merge(right.set_index((10, "rkey"))))
)
check(
lambda left, right: (
left.set_index((10, "lkey")).merge(
right.set_index((10, "rkey")), left_index=True, right_index=True
)
)
)
# TODO: when both left_index=True and right_index=True with multi-index columns
# check(lambda left, right: left.merge(right,
# left_on=[('a', 'lkey')], right_on=[('a', 'rkey')]))
# check(lambda left, right: (left.set_index(('a', 'lkey'))
# .merge(right, left_index=True, right_on=[('a', 'rkey')])))
# non-string names
left_pdf.columns = [10, 100, 1000]
left_psdf.columns = [10, 100, 1000]
right_pdf.columns = [20, 100, 2000]
right_psdf.columns = [20, 100, 2000]
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[100]))
check(lambda left, right: (left.set_index(10).merge(right.set_index(20))))
check(
lambda left, right: (
left.set_index(10).merge(right.set_index(20), left_index=True, right_index=True)
)
)
def test_merge_same_anchor(self):
pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [1, 1, 3, 5, 6, 7],
"x": list("abcdef"),
"y": list("efghij"),
},
columns=["lkey", "rkey", "value", "x", "y"],
)
psdf = ps.from_pandas(pdf)
left_pdf = pdf[["lkey", "value", "x"]]
right_pdf = pdf[["rkey", "value", "y"]]
left_psdf = psdf[["lkey", "value", "x"]]
right_psdf = psdf[["rkey", "value", "y"]]
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
def test_merge_retains_indices(self):
left_pdf = pd.DataFrame({"A": [0, 1]})
right_pdf = pd.DataFrame({"B": [1, 2]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_index=True),
left_pdf.merge(right_pdf, left_index=True, right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_index=True),
left_pdf.merge(right_pdf, left_on="A", right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_on="B"),
left_pdf.merge(right_pdf, left_index=True, right_on="B"),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_on="B"),
left_pdf.merge(right_pdf, left_on="A", right_on="B"),
)
def test_merge_how_parameter(self):
left_pdf = pd.DataFrame({"A": [1, 2]})
right_pdf = pd.DataFrame({"B": ["x", "y"]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True)
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True)
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="left")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="left")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="right")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="right")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="outer")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="outer")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
def test_merge_raises(self):
left = ps.DataFrame(
{"value": [1, 2, 3, 5, 6], "x": list("abcde")},
columns=["value", "x"],
index=["foo", "bar", "baz", "foo", "bar"],
)
right = ps.DataFrame(
{"value": [4, 5, 6, 7, 8], "y": list("fghij")},
columns=["value", "y"],
index=["baz", "foo", "bar", "baz", "foo"],
)
with self.assertRaisesRegex(ValueError, "No common columns to perform merge on"):
left[["x"]].merge(right[["y"]])
with self.assertRaisesRegex(ValueError, "not a combination of both"):
left.merge(right, on="value", left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_index=True)
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_on="y")
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_index=True)
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on="value", right_on=["value", "y"])
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on=["value", "x"], right_on="value")
with self.assertRaisesRegex(ValueError, "['inner', 'left', 'right', 'full', 'outer']"):
left.merge(right, left_index=True, right_index=True, how="foo")
with self.assertRaisesRegex(KeyError, "id"):
left.merge(right, on="id")
def test_append(self):
pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"))
psdf = ps.from_pandas(pdf)
other_pdf = pd.DataFrame([[3, 4], [5, 6]], columns=list("BC"), index=[2, 3])
other_psdf = ps.from_pandas(other_pdf)
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
self.assert_eq(psdf.append(psdf, ignore_index=True), pdf.append(pdf, ignore_index=True))
# Assert DataFrames with non-matching columns
self.assert_eq(psdf.append(other_psdf), pdf.append(other_pdf))
# Assert appending a Series fails
msg = "DataFrames.append() does not support appending Series to DataFrames"
with self.assertRaises(TypeError, msg=msg):
psdf.append(psdf["A"])
# Assert using the sort parameter raises an exception
msg = "The 'sort' parameter is currently not supported"
with self.assertRaises(NotImplementedError, msg=msg):
psdf.append(psdf, sort=True)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
psdf.append(other_psdf, verify_integrity=True),
pdf.append(other_pdf, verify_integrity=True),
)
msg = "Indices have overlapping values"
with self.assertRaises(ValueError, msg=msg):
psdf.append(psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
psdf.append(psdf, ignore_index=True, verify_integrity=True),
pdf.append(pdf, ignore_index=True, verify_integrity=True),
)
# Assert appending multi-index DataFrames
multi_index_pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[[2, 3], [4, 5]])
multi_index_psdf = ps.from_pandas(multi_index_pdf)
other_multi_index_pdf = pd.DataFrame(
[[5, 6], [7, 8]], columns=list("AB"), index=[[2, 3], [6, 7]]
)
other_multi_index_psdf = ps.from_pandas(other_multi_index_pdf)
self.assert_eq(
multi_index_psdf.append(multi_index_psdf), multi_index_pdf.append(multi_index_pdf)
)
# Assert DataFrames with non-matching columns
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf),
multi_index_pdf.append(other_multi_index_pdf),
)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf, verify_integrity=True),
multi_index_pdf.append(other_multi_index_pdf, verify_integrity=True),
)
with self.assertRaises(ValueError, msg=msg):
multi_index_psdf.append(multi_index_psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
multi_index_psdf.append(multi_index_psdf, ignore_index=True, verify_integrity=True),
multi_index_pdf.append(multi_index_pdf, ignore_index=True, verify_integrity=True),
)
# Assert trying to append DataFrames with different index levels
msg = "Both DataFrames have to have the same number of index levels"
with self.assertRaises(ValueError, msg=msg):
psdf.append(multi_index_psdf)
# Skip index level check when ignore_index=True
self.assert_eq(
psdf.append(multi_index_psdf, ignore_index=True),
pdf.append(multi_index_pdf, ignore_index=True),
)
columns = pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
def test_clip(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
# Assert list-like values are not accepted for 'lower' and 'upper'
msg = "List-like value are not supported for 'lower' and 'upper' at the moment"
with self.assertRaises(TypeError, msg=msg):
psdf.clip(lower=[1])
with self.assertRaises(TypeError, msg=msg):
psdf.clip(upper=[1])
# Assert no lower or upper
self.assert_eq(psdf.clip(), pdf.clip())
# Assert lower only
self.assert_eq(psdf.clip(1), pdf.clip(1))
# Assert upper only
self.assert_eq(psdf.clip(upper=3), pdf.clip(upper=3))
# Assert lower and upper
self.assert_eq(psdf.clip(1, 3), pdf.clip(1, 3))
pdf["clip"] = pdf.A.clip(lower=1, upper=3)
psdf["clip"] = psdf.A.clip(lower=1, upper=3)
self.assert_eq(psdf, pdf)
# Assert behavior on string values
str_psdf = ps.DataFrame({"A": ["a", "b", "c"]}, index=np.random.rand(3))
self.assert_eq(str_psdf.clip(1, 3), str_psdf)
def test_binary_operators(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf + psdf.copy(), pdf + pdf.copy())
self.assert_eq(psdf + psdf.loc[:, ["A", "B"]], pdf + pdf.loc[:, ["A", "B"]])
self.assert_eq(psdf.loc[:, ["A", "B"]] + psdf, pdf.loc[:, ["A", "B"]] + pdf)
self.assertRaisesRegex(
ValueError,
"it comes from a different dataframe",
lambda: ps.range(10).add(ps.range(10)),
)
self.assertRaisesRegex(
TypeError,
"add with a sequence is currently not supported",
lambda: ps.range(10).add(ps.range(10).id),
)
psdf_other = psdf.copy()
psdf_other.columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
self.assertRaisesRegex(
ValueError,
"cannot join with no overlapping index names",
lambda: psdf.add(psdf_other),
)
def test_binary_operator_add(self):
# Positive
pdf = pd.DataFrame({"a": ["x"], "b": ["y"], "c": [1], "d": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] + psdf["b"], pdf["a"] + pdf["b"])
self.assert_eq(psdf["c"] + psdf["d"], pdf["c"] + pdf["d"])
# Negative
ks_err_msg = "Addition can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + 1)
def test_binary_operator_sub(self):
# Positive
pdf = pd.DataFrame({"a": [2], "b": [1]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] - psdf["b"], pdf["a"] - pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Subtraction can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" - psdf["b"])
ks_err_msg = "Subtraction can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - 1)
psdf = ps.DataFrame({"a": ["x"], "b": ["y"]})
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
def test_binary_operator_truediv(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] / psdf["b"], pdf["a"] / pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "True division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" / psdf["b"])
ks_err_msg = "True division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] / psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 / psdf["a"])
def test_binary_operator_floordiv(self):
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Floor division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] // psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 // psdf["a"])
ks_err_msg = "Floor division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" // psdf["b"])
def test_binary_operator_mod(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] % psdf["b"], pdf["a"] % pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Modulo can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % "literal")
ks_err_msg = "Modulo can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] % psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 % psdf["a"])
def test_binary_operator_multiply(self):
# Positive
pdf = pd.DataFrame({"a": ["x", "y"], "b": [1, 2], "c": [3, 4]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["b"] * psdf["c"], pdf["b"] * pdf["c"])
self.assert_eq(psdf["c"] * psdf["b"], pdf["c"] * pdf["b"])
self.assert_eq(psdf["a"] * psdf["b"], pdf["a"] * pdf["b"])
self.assert_eq(psdf["b"] * psdf["a"], pdf["b"] * pdf["a"])
self.assert_eq(psdf["a"] * 2, pdf["a"] * 2)
self.assert_eq(psdf["b"] * 2, pdf["b"] * 2)
self.assert_eq(2 * psdf["a"], 2 * pdf["a"])
self.assert_eq(2 * psdf["b"], 2 * pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [2]})
ks_err_msg = "Multiplication can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * 0.1)
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 0.1 * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["a"])
def test_sample(self):
pdf = pd.DataFrame({"A": [0, 2, 4]})
psdf = ps.from_pandas(pdf)
# Make sure the tests run, but we can't check the result because they are non-deterministic.
psdf.sample(frac=0.1)
psdf.sample(frac=0.2, replace=True)
psdf.sample(frac=0.2, random_state=5)
psdf["A"].sample(frac=0.2)
psdf["A"].sample(frac=0.2, replace=True)
psdf["A"].sample(frac=0.2, random_state=5)
with self.assertRaises(ValueError):
psdf.sample()
with self.assertRaises(NotImplementedError):
psdf.sample(n=1)
def test_add_prefix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
def test_add_suffix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
def test_join(self):
# check basic function
pdf1 = pd.DataFrame(
{"key": ["K0", "K1", "K2", "K3"], "A": ["A0", "A1", "A2", "A3"]}, columns=["key", "A"]
)
pdf2 = pd.DataFrame(
{"key": ["K0", "K1", "K2"], "B": ["B0", "B1", "B2"]}, columns=["key", "B"]
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# join with duplicated columns in Series
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
ks1 = ps.Series(["A1", "A5"], index=[1, 2], name="A")
psdf1.join(ks1, how="outer")
# join with duplicated columns in DataFrame
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
psdf1.join(psdf2, how="outer")
# check `on` parameter
join_pdf = pdf1.join(pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index("key").join(
pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index("key").join(
psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index columns
columns1 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "A")])
columns2 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "B")])
pdf1.columns = columns1
pdf2.columns = columns2
psdf1.columns = columns1
psdf2.columns = columns2
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# check `on` parameter
join_pdf = pdf1.join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index(("x", "key")).join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index(("x", "key")).join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index
midx1 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c"), ("z", "d")], names=["index1", "index2"]
)
midx2 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c")], names=["index1", "index2"]
)
pdf1.index = midx1
pdf2.index = midx2
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, on=["index1", "index2"], rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, on=["index1", "index2"], rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
with self.assertRaisesRegex(
ValueError, r'len\(left_on\) must equal the number of levels in the index of "right"'
):
psdf1.join(psdf2, on=["index1"], rsuffix="_right")
def test_replace(self):
pdf = pd.DataFrame(
{
"name": ["Ironman", "Captain America", "Thor", "Hulk"],
"weapon": ["Mark-45", "Shield", "Mjolnir", "Smash"],
},
index=np.random.rand(4),
)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only for method='pad"
):
psdf.replace(method="bfill")
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only when limit=None"
):
psdf.replace(limit=10)
with self.assertRaisesRegex(
NotImplementedError, "replace currently doesn't supports regex"
):
psdf.replace(regex="")
with self.assertRaisesRegex(ValueError, "Length of to_replace and value must be same"):
psdf.replace(to_replace=["Ironman"], value=["Spiderman", "Doctor Strange"])
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace("Ironman", lambda x: "Spiderman")
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace(lambda x: "Ironman", "Spiderman")
self.assert_eq(psdf.replace("Ironman", "Spiderman"), pdf.replace("Ironman", "Spiderman"))
self.assert_eq(
psdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
pdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
)
self.assert_eq(
psdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
pdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
)
# inplace
pser = pdf.name
psser = psdf.name
pdf.replace("Ironman", "Spiderman", inplace=True)
psdf.replace("Ironman", "Spiderman", inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf = pd.DataFrame(
{"A": [0, 1, 2, 3, np.nan], "B": [5, 6, 7, 8, np.nan], "C": ["a", "b", "c", "d", None]},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
pdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
)
self.assert_eq(
psdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(psdf.replace({"C": ["a", None]}, "e"), pdf.replace({"C": ["a", None]}, "e"))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
pdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
)
self.assert_eq(
psdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("Y", "C"): ["a", None]}, "e"),
pdf.replace({("Y", "C"): ["a", None]}, "e"),
)
def test_update(self):
# check base function
def get_data(left_columns=None, right_columns=None):
left_pdf = pd.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", np.nan, np.nan]}, columns=["A", "B"]
)
right_pdf = pd.DataFrame(
{"B": ["x", np.nan, "y", np.nan], "C": ["100", "200", "300", "400"]},
columns=["B", "C"],
)
left_psdf = ps.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", None, None]}, columns=["A", "B"]
)
right_psdf = ps.DataFrame(
{"B": ["x", None, "y", None], "C": ["100", "200", "300", "400"]}, columns=["B", "C"]
)
if left_columns is not None:
left_pdf.columns = left_columns
left_psdf.columns = left_columns
if right_columns is not None:
right_pdf.columns = right_columns
right_psdf.columns = right_columns
return left_psdf, left_pdf, right_psdf, right_pdf
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
pser = left_pdf.B
psser = left_psdf.B
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
self.assert_eq(psser.sort_index(), pser.sort_index())
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
with self.assertRaises(NotImplementedError):
left_psdf.update(right_psdf, join="right")
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
right_columns = pd.MultiIndex.from_tuples([("X", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
right_columns = pd.MultiIndex.from_tuples([("Y", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
def test_pivot_table_dtypes(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [1, 2, 2, 4, 2, 4],
"c": [1, 2, 9, 4, 7, 4],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Skip columns comparison by reset_index
res_df = psdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
exp_df = pdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
self.assert_eq(res_df, exp_df)
# Results don't have the same column's name
# Todo: self.assert_eq(psdf.pivot_table(columns="a", values="b").dtypes,
# pdf.pivot_table(columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes, pdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes)
def test_pivot_table(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [10, 20, 20, 40, 20, 40],
"c": [1, 2, 9, 4, 7, 4],
"d": [-1, -2, -3, -4, -5, -6],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Checking if both DataFrames have the same results
self.assert_eq(
psdf.pivot_table(columns="a", values="b").sort_index(),
pdf.pivot_table(columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["e", "c"], columns="a", values="b", fill_value=999
).sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b", fill_value=999).sort_index(),
almost=True,
)
# multi-index columns
columns = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "e"), ("z", "c"), ("w", "d")]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pivot_table(columns=("x", "a"), values=("x", "b")).sort_index(),
pdf.pivot_table(columns=[("x", "a")], values=[("x", "b")]).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b"), ("y", "e")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e"), ("w", "d")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e"), ("w", "d")],
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")],
columns=("x", "a"),
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
almost=True,
)
def test_pivot_table_and_index(self):
# https://github.com/databricks/koalas/issues/805
pdf = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
},
columns=["A", "B", "C", "D", "E"],
index=np.random.rand(9),
)
psdf = ps.from_pandas(pdf)
ptable = pdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
ktable = psdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
self.assert_eq(ktable, ptable)
self.assert_eq(ktable.index, ptable.index)
self.assert_eq(repr(ktable.index), repr(ptable.index))
def test_stack(self):
pdf_single_level_cols = pd.DataFrame(
[[0, 1], [2, 3]], index=["cat", "dog"], columns=["weight", "height"]
)
psdf_single_level_cols = ps.from_pandas(pdf_single_level_cols)
self.assert_eq(
psdf_single_level_cols.stack().sort_index(), pdf_single_level_cols.stack().sort_index()
)
multicol1 = pd.MultiIndex.from_tuples(
[("weight", "kg"), ("weight", "pounds")], names=["x", "y"]
)
pdf_multi_level_cols1 = pd.DataFrame(
[[1, 2], [2, 4]], index=["cat", "dog"], columns=multicol1
)
psdf_multi_level_cols1 = ps.from_pandas(pdf_multi_level_cols1)
self.assert_eq(
psdf_multi_level_cols1.stack().sort_index(), pdf_multi_level_cols1.stack().sort_index()
)
multicol2 = pd.MultiIndex.from_tuples([("weight", "kg"), ("height", "m")])
pdf_multi_level_cols2 = pd.DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=["cat", "dog"], columns=multicol2
)
psdf_multi_level_cols2 = ps.from_pandas(pdf_multi_level_cols2)
self.assert_eq(
psdf_multi_level_cols2.stack().sort_index(), pdf_multi_level_cols2.stack().sort_index()
)
pdf = pd.DataFrame(
{
("y", "c"): [True, True],
("x", "b"): [False, False],
("x", "c"): [True, False],
("y", "a"): [False, True],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.stack().sort_index(), pdf.stack().sort_index())
self.assert_eq(psdf[[]].stack().sort_index(), pdf[[]].stack().sort_index(), almost=True)
def test_unstack(self):
pdf = pd.DataFrame(
np.random.randn(3, 3),
index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.unstack().sort_index(), pdf.unstack().sort_index(), almost=True)
self.assert_eq(
psdf.unstack().unstack().sort_index(), pdf.unstack().unstack().sort_index(), almost=True
)
def test_pivot_errors(self):
psdf = ps.range(10)
with self.assertRaisesRegex(ValueError, "columns should be set"):
psdf.pivot(index="id")
with self.assertRaisesRegex(ValueError, "values should be set"):
psdf.pivot(index="id", columns="id")
def test_pivot_table_errors(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [1, 2, 2, 4, 2, 4],
"c": [1, 2, 9, 4, 7, 4],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.pivot_table(index=["c"], columns="a", values=5))
msg = "index should be a None or a list of columns."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(index="c", columns="a", values="b")
msg = "pivot_table doesn't support aggfunc as dict and without index."
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.pivot_table(columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"})
msg = "columns should be one column name."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns=["a"], values=["b"], aggfunc={"b": "mean", "e": "sum"})
msg = "Columns in aggfunc must be the same as values."
with self.assertRaisesRegex(ValueError, msg):
psdf.pivot_table(
index=["e", "c"], columns="a", values="b", aggfunc={"b": "mean", "e": "sum"}
)
msg = "values can't be a list without index."
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.pivot_table(columns="a", values=["b", "e"])
msg = "Wrong columns A."
with self.assertRaisesRegex(ValueError, msg):
psdf.pivot_table(
index=["c"], columns="A", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
)
msg = "values should be one column or list of columns."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns="a", values=(["b"], ["c"]))
msg = "aggfunc must be a dict mapping from column name to aggregate functions"
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns="a", values="b", aggfunc={"a": lambda x: sum(x)})
psdf = ps.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
},
columns=["A", "B", "C", "D", "E"],
index=np.random.rand(9),
)
msg = "values should be a numeric type."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(
index=["C"], columns="A", values=["B", "E"], aggfunc={"B": "mean", "E": "sum"}
)
msg = "values should be a numeric type."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(index=["C"], columns="A", values="B", aggfunc={"B": "mean"})
def test_transpose(self):
# TODO: what if with random index?
pdf1 = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}, columns=["col1", "col2"])
psdf1 = ps.from_pandas(pdf1)
pdf2 = pd.DataFrame(
data={"score": [9, 8], "kids": [0, 0], "age": [12, 22]},
columns=["score", "kids", "age"],
)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index())
self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index())
with option_context("compute.max_rows", None):
self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index())
self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index())
pdf3 = pd.DataFrame(
{
("cg1", "a"): [1, 2, 3],
("cg1", "b"): [4, 5, 6],
("cg2", "c"): [7, 8, 9],
("cg3", "d"): [9, 9, 9],
},
index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]),
)
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index())
with option_context("compute.max_rows", None):
self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index())
def _test_cummin(self, pdf, psdf):
self.assert_eq(pdf.cummin(), psdf.cummin())
self.assert_eq(pdf.cummin(skipna=False), psdf.cummin(skipna=False))
self.assert_eq(pdf.cummin().sum(), psdf.cummin().sum())
def test_cummin(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cummin(pdf, psdf)
def test_cummin_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cummin(pdf, psdf)
def _test_cummax(self, pdf, psdf):
self.assert_eq(pdf.cummax(), psdf.cummax())
self.assert_eq(pdf.cummax(skipna=False), psdf.cummax(skipna=False))
self.assert_eq(pdf.cummax().sum(), psdf.cummax().sum())
def test_cummax(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cummax(pdf, psdf)
def test_cummax_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cummax(pdf, psdf)
def _test_cumsum(self, pdf, psdf):
self.assert_eq(pdf.cumsum(), psdf.cumsum())
self.assert_eq(pdf.cumsum(skipna=False), psdf.cumsum(skipna=False))
self.assert_eq(pdf.cumsum().sum(), psdf.cumsum().sum())
def test_cumsum(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cumsum(pdf, psdf)
def test_cumsum_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cumsum(pdf, psdf)
def _test_cumprod(self, pdf, psdf):
self.assert_eq(pdf.cumprod(), psdf.cumprod(), almost=True)
self.assert_eq(pdf.cumprod(skipna=False), psdf.cumprod(skipna=False), almost=True)
self.assert_eq(pdf.cumprod().sum(), psdf.cumprod().sum(), almost=True)
def test_cumprod(self):
pdf = pd.DataFrame(
[[2.0, 1.0, 1], [5, None, 2], [1.0, -1.0, -3], [2.0, 0, 4], [4.0, 9.0, 5]],
columns=list("ABC"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cumprod(pdf, psdf)
def test_cumprod_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.rand(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cumprod(pdf, psdf)
def test_drop_duplicates(self):
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
# inplace is False
for keep in ["first", "last", False]:
with self.subTest(keep=keep):
self.assert_eq(
pdf.drop_duplicates(keep=keep).sort_index(),
psdf.drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates("a", keep=keep).sort_index(),
psdf.drop_duplicates("a", keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates(["a", "b"], keep=keep).sort_index(),
psdf.drop_duplicates(["a", "b"], keep=keep).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(),
psdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(),
psdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(),
)
columns = | pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")]) | pandas.MultiIndex.from_tuples |
"""
Name: FeatureServerExtraction
Purpose: Takes image input and provides resultant category
"""
import cv2
import pickle
import pandas as pd
import numpy as np
import scipy
import pytesseract
from scipy import stats
MODEL_PATH = 'static/adroast_model.sav'
def main():
extract_feature('52712850_2504348282972736_2536715282538299392_n.png')
def extract_feature(filepath):
ad_image = cv2.imread(filepath, cv2.COLOR_BGR2RGB)
feature_set = {}
feature_set['colorfullness'] = image_colorfulness(ad_image)
feature_set['edges'] = harris_corner_detection(ad_image)
feature_set['text_len'] = text_len(ad_image)
feature_set['word_len'] = word_len(ad_image)
feature_analysis = rgb_hist_analysis(ad_image)
feature_set['r_mean'] = feature_analysis[0]
feature_set['r_variance'] = feature_analysis[1]
feature_set['r_kurtosis'] = feature_analysis[2]
feature_set['r_skewness'] = feature_analysis[3]
feature_set['g_mean'] = feature_analysis[4]
feature_set['g_variance'] = feature_analysis[5]
feature_set['g_kurtosis'] = feature_analysis[6]
feature_set['g_skewness'] = feature_analysis[7]
feature_set['b_mean'] = feature_analysis[8]
feature_set['b_variance'] = feature_analysis[9]
feature_set['b_kurtosis'] = feature_analysis[10]
feature_set['b_skewness'] = feature_analysis[11]
improvements = top_improvements(feature_set)
prediction_features = | pd.DataFrame(feature_set, index=[0]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 8 08:11:50 2021
@author: <NAME>
"""
import os
from pickle import load, dump
import subprocess
from time import time, sleep
from shutil import rmtree
import numpy as np
import pandas as pd
from reificationFusion import model_reification
import concurrent.futures
from multiprocessing import cpu_count
from copy import deepcopy
from util import cartesian, call_model, apply_constraints
from util import calculate_KG, calculate_EI, fused_calculate, calculate_TS, calculate_Greedy, calculate_PI, calculate_UCB
from util import calculate_GPHedge, evaluateFusedModel, batchAcquisitionFunc, kmedoids_max
from util import fused_EHVI, calculate_EHVI, Pareto_finder, storeObject
from gpModel import gp_model, bmarsModel
from sklearn_extra.cluster import KMedoids
import logging
from pyDOE import lhs
import matplotlib.pyplot as plt
import concurrent.futures
def Pool():
return concurrent.futures.ThreadPoolExecutor(8)
# from ray.util.multiprocessing import Pool
class barefoot():
def __init__(self, ROMModelList=[], TruthModel=[], calcInitData=True,
initDataPathorNum=[], multiNode=0, workingDir=".",
calculationName="Calculation", nDim=1, input_resolution=5, restore_calc=False,
updateROMafterTM=False, externalTM=False, acquisitionFunc="KG",
A=[], b=[], Aeq=[], beq=[], lb=[], ub=[], func=[], keepSubRunning=True,
verbose=False, sampleScheme="LHS", tmSampleOpt="Greedy", logname="BAREFOOT",
maximize=True, train_func=[], reification=True, batch=True,
multiObjective=False, multiObjectRef=[], surrogate="GP", externalROM=False,
temp_input=[]):
self.temp_input = temp_input
"""
Python Class for Batch Reification/Fusion Optimization (BAREFOOT) Framework Calculations
Parameters
----------
ROMModelList : This is the list of functions that are the cheap information sources.
These need to be in a form that ensures that by providing the unit hypercube
input, the function will provide the required output
TruthModel : This is the Truth model, or the function that needs to be optimized.
calcInitData : This variable controls whether the initial data is calculated for
each of the models or is retrieved from a file
initDataPathorNum : This variable holds the number of initial datapoints to evaluate for each
information source (including the Truth Model), or, when initial data is
loaded from a file, holds the path to the initial data file
multiNode : This variable reflects the number of subprocesses that will be used
for the calculations. A value of zero indicates all calculations will
be completed on the main compute node.
workingDir : This is the path to the working directory. In some cases it may be desirable
to store data separately from the code, this will allow the data to be stored
in alternate locations. Can also be used if the relative directory reference
is not working correctly.
calculationName : This is the name for the calculation and will change the results directory name
nDim : The number of dimensions for the input space that will be used
restore_calc : This parameter toggles whether the framework data is set up from the information
provided or retrieved from a save_state file. This can be used to restart a calculation
updateROMafterTM : This parameter allows the reduced order models to be retrained after getting more data
from the Truth Model. The model function calls do not change, so the training needs to
reflect in the same function. Requires a training function to be supplied in the
train_func input.
train_func : Training function used to retrain the reduced order models after the Truth Model
evaluations.
externalTM : In cases where it is necessary to evaluate the Truth Model separate to the
framework (for example, if the Truth Model is an actual experiment), this toggles
the output of the predicted points to a separate file for use externally. The
framework is shut down after the data is output, see test examples for how to restart
the framework after the external Truth Model has been evaluated
acquisitionFunc : The acquisition function to use to evaluate the next best points for the reduced
order models. Currently the options are "KG" for Knowledge Gradient and "EI" for expected
improvement, "PI" Probability of Improvment, "TS" Thompson sampling, "Greedy" Greedy,
"UCB" Upper confidence bound, "Hedge" GP-Hedge Portfolio optimization.
A, b, Aeq, beq : Equality and inequality constraints according to the following equations:
1) A*x <= b
2) Aeq*x == b
ub, lb : Upper bounds and lower bounds for inputs, all inputs must receive a value
(Specify 0 for lb and 1 for ub if there is no bound for that input)
func : function constraints, must take the input matrix (x) and output a vector of length
equal to the number of samples in the input matrix (x) with boolean values.
keepSubRunning : Determines whether the subprocesses are left running while calling the Truth Model
verbose : Determines the logging level for tracking the calculations.
input_resolution : How many decimal places to use in the inputs.
sampleScheme : Sampling scheme for the test points. Options are "Grid", "LHS", "Custom", "CompFunc".
Where the Custom uses preselected test points from a file, and the CompFunc is
specifically designed for sampling composition spaces.
tmSampleOpt : The acquisition function to use when evaluating next-best points for the Truth Model
logname : The name of the log file
maximize : Toggles if the problem is a maximization or minimization problem. Default is Maximization.
reification : Toggles the use of the multi-fidelity Reification approach
batch : Toggles the use of the Batch BO approach
multiObjective : Toggles multi-objective optimization
multiObjectRef : Holds the reference point required by the EHVI acquisition function
"""
if verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
# create logger to output framework progress
self.logger = logging.getLogger(logname)
for h in self.logger.handlers:
self.logger.removeHandler(h)
self.logger.setLevel(log_level)
fh = logging.FileHandler('{}.log'.format(logname))
sh = logging.StreamHandler()
fh.setLevel(log_level)
sh.setLevel(log_level)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
sh.setFormatter(formatter)
# add the handler to the logger
self.logger.addHandler(fh)
self.logger.addHandler(sh)
if not restore_calc:
with open(f"{logname}.log", 'w') as f:
pass
self.logger.info("#########################################################")
self.logger.info("# #")
self.logger.info("# Start BAREFOOT Framework Initialization #")
self.logger.info("# #")
self.logger.info("#########################################################")
self.logger.info("*********************************************************")
self.logger.info("* Calculation Name: {} ".format(calculationName))
self.logger.info("*********************************************************")
# Restore a previous calculation and restart the timer or load new
# information and initialize
if restore_calc:
if externalTM:
self.__external_TM_data_load(workingDir, calculationName)
else:
self.__load_from_save(workingDir, calculationName)
self.restore_calc = restore_calc
self.pool = Pool()
self.timeCheck = time()
self.logger.info("Previous Save State Restored")
else:
self.restore_calc = restore_calc
self.pool = Pool()
self.timeCheck = time()
self.multiObjective = multiObjective
self.MORef = multiObjectRef
self.ROM = ROMModelList
self.TM = TruthModel
self.TMInitInput = []
self.TMInitOutput = []
self.ROMInitInput = []
self.ROMInitOutput = []
self.inputLabels = []
self.multinode = multiNode
self.workingDir = workingDir
self.calculationName = calculationName
self.calcInitData = calcInitData
self.initDataPathorNum = initDataPathorNum
self.currentIteration = -1
self.maximize = maximize
self.surrogate = surrogate
self.updateROMafterTM = updateROMafterTM
self.reification = reification
self.batch = batch
self.externalTM = externalTM
self.externalROM = externalROM
if self.multiObjective:
self.tmSampleOpt = "EHVI"
self.acquisitionFunc = "EHVI"
self.logger.warning("Default multiobjective acquisition function (EHVI) selected!")
elif self.surrogate == "BMARS":
self.acquisitionFunc = "EI-BMARS"
self.tmSampleOpt = "EI-BMARS"
self.logger.warning("BMARS Surrogate Model selected! Default EI for BMARS acquisition function selected!")
self.reification = False
self.logger.warning("BMARS Surrogate Model not compatible with Reification! Reification approach disabled!")
else:
if tmSampleOpt in ["Hedge", "Greedy", "EI", "KG", "TS", "PI", "UCB"]:
self.tmSampleOpt = tmSampleOpt
else:
self.tmSampleOpt = "Greedy"
self.logger.warning("Invalid Truth Model Acquisition Function! Using default (Greedy).")
if acquisitionFunc in ["Hedge", "Greedy", "EI", "KG", "TS", "PI", "UCB"]:
self.acquisitionFunc = acquisitionFunc
else:
self.acquisitionFunc = "KG"
self.logger.warning("Invalid ROM Acquisition Function! Using default (KG).")
self.nDim = nDim
self.res = input_resolution
self.A = A
self.b = b
self.Aeq = Aeq
self.beq = beq
self.ub = ub
self.lb = lb
self.constr_func = func
self.train_func = train_func
if sampleScheme in ["LHS", "Grid", "Custom", "CompFunc"]:
self.sampleScheme = sampleScheme
else:
self.sampleScheme = "LHS"
self.logger.warning("Invalid Sample Scheme! Using default (LHS).")
if self.multinode != 0:
self.keepSubRunning = keepSubRunning
else:
self.keepSubRunning = True
self.__create_dir_and_files()
self.__create_output_dataframes()
self.__get_initial_data__()
self.logger.info("Initialization Completed")
def __catch_error(func):
"""
If an error occurs during the initialization of the framework this decorator will catch
that error
"""
def close_subs(self, *args, **kwargs):
no_error = False
try:
func(self, *args, **kwargs)
no_error = True
except Exception as err:
self.logger.critical("Initialization Code Failed - See Error Below")
self.logger.exception(err)
return no_error
return close_subs
def __create_dir_and_files(self):
# Create the required directories for saving the results and the subprocess
# information if applicable
try:
os.mkdir('{}/results'.format(self.workingDir))
self.logger.debug("Results Directory Created Successfully")
except FileExistsError:
self.logger.debug("Results Directory Already Exists")
try:
os.mkdir('{}/data'.format(self.workingDir))
self.logger.debug("Data Directory Created Successfully")
except FileExistsError:
self.logger.debug("Data Directory Already Exists")
try:
os.mkdir('{}/data/parameterSets'.format(self.workingDir))
self.logger.debug("Parameter Set Directory Created Successfully")
except FileExistsError:
self.logger.debug("Parameter Set Directory Already Exists")
try:
os.mkdir('{}/results/{}'.format(self.workingDir,
self.calculationName))
self.logger.debug("Calculation Results Directory [{}] Created Successfully".format(self.calculationName))
except FileExistsError:
self.logger.debug("Calculation Results Directory [{}] Already Exists".format(self.calculationName))
# If using subprocesses, create the folder structure needed
if self.multinode != 0:
with open("BAREFOOT.log", 'w') as f:
pass
if os.path.exists('{}/subprocess'.format(self.workingDir)):
rmtree('{}/subprocess'.format(self.workingDir))
self.logger.debug("Existing Subprocess Directory Removed")
os.mkdir('{}/subprocess'.format(self.workingDir))
os.mkdir('{}/subprocess/LSFOut'.format(self.workingDir))
self.logger.debug("Subprocess Directory Created")
def __create_output_dataframes(self):
# The output of the framework is contained in two pandas dataframes
# the iterationData df shows the iterations, model calls and maximum
# value found
if self.multiObjective:
labels2 = ["Iteration", "Calculation Time", "Objective 1", "Objective 2", "Truth Model"]
else:
labels2 = ["Iteration", "Max Found", "Calculation Time", "Truth Model"]
for ii in range(len(self.ROM)):
labels2.append("ROM {}".format(ii))
# the evaluatedPoints df contains all the points that have been
# evaluated from all models
if self.multiObjective:
labels1 = ["Model Index", "Iteration", "y1", "y2"]
else:
labels1 = ["Model Index", "Iteration", "y"]
for ii in range(self.nDim):
labels1.append("x{}".format(ii))
if self.multiObjective:
labels2.append("x{}".format(ii))
self.inputLabels.append("x{}".format(ii))
self.evaluatedPoints = pd.DataFrame(columns=labels1)
self.iterationData = pd.DataFrame(columns=labels2)
self.logger.debug("Output Dataframes Created")
def __save_output_dataframes(self):
fig,ax = plt.subplots(1,2,figsize=(10,5))
ax[0].set_xlabel('RVE Evaluations')
ax[0].set_ylabel('$1/\sigma(d\sigma/d\epsilon_{pl})$')
ax[0].set_xlim(0,20)
ax[0].set_xticks([0,2,4,6,8,10,12,14,16,18,20])
ax[0].set_ylim(0,35)
ax[1].set_xlabel('Iteration')
ax[1].set_ylabel('Model Evaluations')
ax[1].set_xlim(0,20)
ax[1].set_ylim(0,100)
def pltsin(ax, fig, x, y, lbls):
if ax.lines:
ii = 0
for line in ax.lines:
line.set_xdata(x[ii])
line.set_ydata(y[ii])
ii += 1
else:
ax.plot(x[0], y[0], 'r-', label=lbls[0])
ax.plot(x[1], y[1], 'g:', label=lbls[1])
ax.plot(x[2], y[2], 'b-.', label=lbls[2])
ax.legend()
fig.canvas.draw()
with open("reificationOnlyResults.pickle", 'rb') as f:
reifi_out = load(f)
with open("BBOOnlyResults.pickle", 'rb') as f:
BBO_out = load(f)
iteration = np.array(self.iterationData.loc[:,"Iteration"])
max_val = np.array(self.iterationData.loc[:,"Max Found"])
rve_calls = np.array(self.iterationData.loc[:,"Truth Model"])
rve_calls[0] = 0
isostrain_calls = np.array(self.iterationData.loc[:,"ROM 0"])
isostress_calls = np.array(self.iterationData.loc[:,"ROM 1"])
isowork_calls = np.array(self.iterationData.loc[:,"ROM 2"])
pltsin(ax[0], fig, [rve_calls, reifi_out[0], BBO_out[0]],
[max_val, reifi_out[1], BBO_out[1]],
["BAREFOOT", "Reification/Fusion", "Batch Bayesian Optimization"])
pltsin(ax[1], fig, [iteration, iteration, iteration],
[isostrain_calls, isostress_calls, isowork_calls],
["Isostrain", "Isostress", "Isowork"])
plt.show()
# The dataframes are saved in two forms, first a pickled version of the
# dataframe, and also a csv version for readability
with open('{}/results/{}/evaluatedPoints'.format(self.workingDir, self.calculationName), 'wb') as f:
dump(self.evaluatedPoints, f)
self.evaluatedPoints.to_csv('{}/results/{}/evaluatedPoints.csv'.format(self.workingDir, self.calculationName))
with open('{}/results/{}/iterationData'.format(self.workingDir, self.calculationName), 'wb') as f:
dump(self.iterationData, f)
self.iterationData.to_csv('{}/results/{}/iterationData.csv'.format(self.workingDir, self.calculationName))
# for the GP Hedge approach, the choice of model for each iteration is
# also saved to a separate file
hedge_out = {"ROM":[], "TM":[]}
if self.acquisitionFunc == "Hedge":
hedge_out["ROM"] = self.gpHedgeTrack
if self.tmSampleOpt == "Hedge":
hedge_out["TM"] = self.gpHedgeTrackTM
if self.acquisitionFunc == "Hedge" or self.tmSampleOpt == "Hedge":
with open('{}/results/{}/hedgeRecord'.format(self.workingDir, self.calculationName), 'wb') as f:
dump(hedge_out, f)
if self.multiObjective:
with open('{}/results/{}/paretoRecord{}'.format(self.workingDir, self.calculationName, self.currentIteration), 'wb') as f:
dump(self.pareto, f)
self.logger.info("Dataframes Pickled and Dumped to Results Directory")
def __save_calculation_state(self):
skipValues = ['logger', 'pool']
saveObj = {}
for item in self.__dict__:
if item not in skipValues:
saveObj[item] = self.__dict__[item]
# This function saves the entire barefoot object into a pickle file
with open('{}/data/{}_save_state'.format(self.workingDir, self.calculationName), 'wb') as f:
dump(saveObj, f)
self.logger.info("Calculation State Saved")
# self.logger.info("Calculation State Save Skipped")
def __load_from_save(self, workingDir, calculationName):
# This function restores the barefoot object parameters from a saved
# pickle file. In order for this to work, each variable of the object
# is restored separately.
try:
print('{}/data/{}_save_state'.format(workingDir, calculationName))
with open('{}/data/{}_save_state'.format(workingDir, calculationName), 'rb') as f:
saveState = load(f)
self.logger.debug("Save State File Found")
for item in saveState:
setattr(self, item, saveState[item])
except FileNotFoundError:
self.loadFailed = True
self.logger.warning("Could not find Save State File")
def __add_to_evaluatedPoints(self, modelIndex, eval_x, eval_y):
# Adds new data points to the evaluated datapoints dataframe
if self.multiObjective:
temp = np.zeros((eval_x.shape[0], self.nDim+4))
temp[:,0] = modelIndex
temp[:,1] = self.currentIteration
temp[:,2] = eval_y[:,0]
temp[:,3] = eval_y[:,1]
temp[:,4:] = eval_x[:,0:]
temp = pd.DataFrame(temp, columns=self.evaluatedPoints.columns)
else:
temp = np.zeros((eval_x.shape[0], self.nDim+3))
temp[:,0] = modelIndex
temp[:,1] = self.currentIteration
temp[:,2] = eval_y
temp[:,3:] = eval_x
temp = pd.DataFrame(temp, columns=self.evaluatedPoints.columns)
self.evaluatedPoints = pd.concat([self.evaluatedPoints,temp])
if self.multiObjective:
self.pareto = Pareto_finder(np.array(self.evaluatedPoints.iloc[:,2:4]),self.goal)
self.logger.debug("{} New Points Added to Evaluated Points Dataframe".format(eval_x.shape[0]))
def __add_to_iterationData(self, calcTime, iterData):
# Adds new data points to the Iteration Data Dataframe
if self.multiObjective:
temp = np.zeros((1,5+len(self.ROM)+self.nDim))
temp[0,0] = self.currentIteration
temp[0,1] = calcTime
temp[0,2] = self.maxTM[0]
temp[0,3] = self.maxTM[1]
temp[0,4] = iterData[-1]
temp[0,5:5+len(self.ROM)] = iterData[0:len(self.ROM)]
else:
temp = np.zeros((1,4+len(self.ROM)))
temp[0,0] = self.currentIteration
temp[0,1] = self.maxTM
temp[0,2] = calcTime
temp[0,3] = iterData[-1]
temp[0,4:] = iterData[0:len(self.ROM)]
temp = pd.DataFrame(temp, columns=self.iterationData.columns)
self.iterationData = pd.concat([self.iterationData,temp])
self.logger.debug("Iteration {} Data saved to Dataframe".format(self.currentIteration))
@__catch_error
def __get_initial_data__(self):
# Function for obtaining the initial data either by calculation or by
# extracting the data from a file.
params = []
count = []
param_index = 0
if self.multiObjective:
self.maxTM = [-np.inf,-np.inf]
else:
self.maxTM = -np.inf
if self.acquisitionFunc == "Hedge":
self.gpHedgeHist = [[np.random.random()],[np.random.random()],
[np.random.random()],[np.random.random()],
[np.random.random()],[np.random.random()]]
self.gpHedgeProb = np.sum(self.gpHedgeHist, axis=1)
self.gpHedgeTrack = []
if self.tmSampleOpt == "Hedge":
self.gpHedgeHistTM = [[np.random.random()],[np.random.random()],
[np.random.random()],[np.random.random()],
[np.random.random()],[np.random.random()]]
self.gpHedgeProbTM = np.sum(self.gpHedgeHistTM, axis=1)
self.gpHedgeTrackTM = []
if self.multiObjective:
if type(self.maximize) == list:
self.goal = np.array([-1,-1])
if self.maximize[0]:
self.goal[0] = 1
if self.maximize[1]:
self.goal[1] = 1
else:
if self.maximize:
self.goal = np.array([1,1])
else:
self.goal = np.array([-1,-1])
else:
if self.maximize:
self.goal = 1
else:
self.goal = -1
# Check if data needs to be calculated or extracted
if self.calcInitData:
self.logger.debug("Start Calculation of Initial Data")
# obtain LHS initial data for each reduced order model
if self.reification:
for ii in range(len(self.ROM)):
count.append(0)
initInput, check = apply_constraints(self.initDataPathorNum[ii],
self.nDim, resolution=self.res,
A=self.A, b=self.b, Aeq=self.Aeq, beq=self.beq,
lb=self.lb, ub=self.ub, func=self.constr_func,
sampleScheme=self.sampleScheme,opt_sample_size=True)
if check:
self.logger.debug("ROM {} - Initial Data - All constraints applied successfully".format(ii))
else:
self.logger.critical("ROM {} - Initial Data - Some or All Constraints Could not Be applied! Continuing With {}/{}".format(ii, initInput.shape[0], self.initDataPathorNum[ii]))
for jj in range(initInput.shape[0]):
params.append({"Model Index":ii,
"Model":self.ROM[ii],
"Input Values":initInput[jj,:],
"ParamIndex":param_index})
param_index += 1
self.ROMInitInput.append(np.zeros_like(initInput))
if self.multiObjective:
self.ROMInitOutput.append(np.zeros((initInput.shape[0],2)))
else:
self.ROMInitOutput.append(np.zeros(initInput.shape[0]))
# Obtain LHS initial data for Truth Model
initInput, check = apply_constraints(self.initDataPathorNum[-1],
self.nDim, resolution=self.res,
A=self.A, b=self.b, Aeq=self.Aeq, beq=self.beq,
lb=self.lb, ub=self.ub, func=self.constr_func,
sampleScheme=self.sampleScheme,opt_sample_size=True)
count.append(0)
if check:
self.logger.debug("TM - Initial Data - All constraints applied successfully")
else:
self.logger.critical("TM - Initial Data - Some or All Constraints Could not Be applied! Continuing With {}/{}".format(initInput.shape[0], self.initDataPathorNum[-1]))
for jj in range(initInput.shape[0]):
params.append({"Model Index":-1,
"Model":self.TM,
"Input Values":initInput[jj,:],
"ParamIndex":param_index})
param_index += 1
self.TMInitInput = np.zeros_like(initInput)
if self.multiObjective:
self.TMInitOutput = np.zeros((initInput.shape[0],2))
else:
self.TMInitOutput = np.zeros(initInput.shape[0])
# Calculate all the initial data in parallel
temp_x = np.zeros((len(params), self.nDim))
if self.multiObjective:
temp_y = np.zeros((len(params),2))
else:
temp_y = np.zeros(len(params))
temp_index = np.zeros(len(params))
pass_calculations = []
self.logger.debug("Parameters Defined. Starting Concurrent.Futures Calculation")
try:
self.pool.terminate()
self.pool = Pool()
except AttributeError:
self.pool = Pool()
with self.pool as executor:
for result_from_process in zip(params, executor.map(call_model, params)):
par, results = result_from_process
try:
test = results.shape
if par["Model Index"] != -1:
self.ROMInitInput[par["Model Index"]][count[par["Model Index"]],:] = par["Input Values"]
if self.multiObjective:
self.ROMInitOutput[par["Model Index"]][count[par["Model Index"]]] = np.tile(self.goal, (results.shape[0]))*results
else:
self.ROMInitOutput[par["Model Index"]][count[par["Model Index"]]] = self.goal*results
temp_x[par["ParamIndex"],:] = par["Input Values"]
if self.multiObjective:
temp_y[par["ParamIndex"],:] = self.goal*results
else:
temp_y[par["ParamIndex"]] = self.goal*results
temp_index[par["ParamIndex"]] = par["Model Index"]
else:
self.TMInitInput[count[par["Model Index"]],:] = par["Input Values"]
self.TMInitOutput[count[par["Model Index"]]] = self.goal*results
if self.multiObjective:
if results[0,0] > self.maxTM[0]:
self.maxTM[0] = results[0,0]
if results[0,1] > self.maxTM[1]:
self.maxTM[1] = results[0,1]
else:
if np.max(results) > self.maxTM:
self.maxTM = np.max(results)
temp_x[par["ParamIndex"],:] = par["Input Values"]
if self.multiObjective:
temp_y[par["ParamIndex"],:] = self.goal*results
else:
temp_y[par["ParamIndex"]] = self.goal*results
temp_index[par["ParamIndex"]] = par["Model Index"]
count[par["Model Index"]] += 1
pass_calculations.append(par["ParamIndex"])
except AttributeError:
pass
self.logger.debug("Concurrent.Futures Calculation Completed")
if self.multiObjective:
temp_y = temp_y[pass_calculations,:]
else:
temp_y = temp_y[pass_calculations]
temp_x = temp_x[pass_calculations,:]
temp_index = temp_index[pass_calculations]
else:
# extract the initial data from the file
self.logger.debug("Start Loading Initial Data from Files")
with open(self.initDataPathorNum, 'rb') as f:
data = load(f)
# extract data from dictionary in file and assign to correct variables
self.TMInitOutput = data["TMInitOutput"]
self.TMInitInput = data["TMInitInput"]
if self.reification:
self.ROMInitOutput = data["ROMInitOutput"]
self.ROMInitInput = data["ROMInitInput"]
print(self.TMInitInput)
print(self.TMInitOutput)
ROMSize = 0
for mmm in range(len(self.ROMInitInput)):
ROMSize += self.ROMInitOutput[mmm].shape[0]
temp_x = np.zeros((self.TMInitOutput.shape[0]+ROMSize,
self.nDim))
if self.multiObjective:
temp_y = np.zeros((self.TMInitOutput.shape[0]+ROMSize,2))
else:
temp_y = np.zeros(self.TMInitOutput.shape[0]+ROMSize)
temp_index = np.zeros(self.TMInitOutput.shape[0]+ROMSize)
ind = 0
if self.reification:
for ii in range(len(self.ROM)):
for jj in range(self.ROMInitOutput[ii].shape[0]):
temp_x[ind,:] = self.ROMInitInput[ii][jj,:]
if self.multiObjective:
temp_y[ind,:] = self.goal*self.ROMInitOutput[ii][jj,:]
else:
temp_y[ind] = self.goal*self.ROMInitOutput[ii][jj]
temp_index[ind] = ii
ind += 1
count.append(self.ROMInitInput[ii].shape[0])
for jj in range(self.TMInitOutput.shape[0]):
temp_x[ind,:] = self.TMInitInput[jj,:]
if self.multiObjective:
temp_y[ind,:] = self.goal*self.TMInitOutput[jj,:]
if np.max(temp_y[0,0]) > self.maxTM[0]:
self.maxTM[0] = np.max(temp_y[0,0])
if np.max(temp_y[0,1]) > self.maxTM[1]:
self.maxTM[1] = np.max(temp_y[0,1])
else:
temp_y[ind] = self.TMInitOutput[jj]
if self.TMInitOutput[jj] > self.maxTM:
self.maxTM = self.TMInitOutput[jj]
temp_index[ind] = -1
ind += 1
count.append(self.TMInitInput.shape[0])
self.logger.debug("Loading Data From File Completed")
# Add initial data to dataframes
iterData = np.array(count)
self.__add_to_evaluatedPoints(temp_index, temp_x, temp_y)
self.__add_to_iterationData(time()-self.timeCheck, iterData)
self.logger.debug("Initial Data Saved to Dataframes")
self.timeCheck = time()
@__catch_error
def initialize_parameters(self, modelParam, covFunc="M32", iterLimit=100,
sampleCount=50, hpCount=100, batchSize=5,
tmIter=1e6, totalBudget=1e16, tmBudget=1e16,
upperBound=1, lowBound=0.0001, fusedPoints=500,
fusedHP=[], fusedSamples=10000):
"""
This function sets the conditions for the barefoot framework calculations.
All parameters have default values except the model parameters.
Parameters
----------
modelParam : dictionary
This must be a dictionary with the hyperparameters for the reduced
order models as well as the costs for all the models. The specific
values in the dictionary must be:
'model_l': A list with the characteristic length scale for each
dimension in each reduced order model GP.
eg 2 reduced order - 3 dimension models
[[0.1,0.1,0.1],[0.2,0.2,0.2]]
'model_sf': A list with the signal variance for each reduced
order model GP.
'model_sn': A list with the noise variance for each reduced
order model GP.
'means': A list of the mean of each model. Set to 0 if the mean
is not known
'std': A list of the standard deviations of each model. Set to 1
if the standard deviation is not known.
'err_l': A list with the characteristic length scale for each
dimension in each discrepancy GP. Must match dimensions
of model_l
'err_sf': A list with the signal variance for each discrepancy GP.
'err_sn': A list with the noise variance for each discrepancy GP.
'costs': The model costs, including the Truth Model
eg. 2 ROM : [model 1 cost, model 2 cost, Truth model cost]
covFunc : String, optional
The covariance function to used for the Gaussian Process models.
Options are Squared Exponential ("SE") Matern 3/2 ("M32") and
Matern 5/2 ("M52"). The default is "M32".
iterLimit : Int, optional
How many iterations to run the framework calculation before
terminating. The default is 100.
sampleCount : Int, optional
The number of samples to use for the acquisition function calculations.
The default is 50.
hpCount : Int, optional
The number of hyperparameter sets to use. The default is 100.
batchSize : Int, optional
The batch size for the model evaluations. The default is 5.
tmIter : Int, optional
The number of iterations to complete before querying the Truth Model.
The default is 1e6.
totalBudget : Int/Float, optional
The total time budget to expend before terminating the calculation.
The default is 1e16.
tmBudget : Int/Float, optional
The budget to expend before querying the Truth Model. The default
is 1e16.
upperBound : Float, optional
The upper bound for the hyperparameters. The default is 1.
lowBound : Float, optional
The lower bound for the hyperparameters. The default is 0.0001.
fusedPoints : Int, optional
The number of points to sample from a LHS sampler at which to
evaluate the fused mean and variance for building the fused model.
The default is 500.
fusedHP : List, optional
Holds the hyperparameters for the fused model if the approach does not
use the Batch approach
fusedSamples : Int, optional
The number of samples to take from the design space for evaluating the fused
model for determining next-best points from the Truth model.
"""
self.logger.debug("Start Initializing Reification Object Parameters")
self.covFunc = covFunc
self.iterLimit = iterLimit
self.sampleCount = sampleCount
self.hpCount = hpCount
self.batchSize = batchSize
self.tmIterLim = tmIter
self.totalBudget = totalBudget
self.tmBudget = tmBudget
self.upperBound = upperBound
self.lowBound = lowBound
self.modelParam = modelParam
self.modelCosts = modelParam["costs"]
self.fusedHP = fusedHP
self.fusedSamples = fusedSamples
# The numpy linspace module will contract the distance below 1 if there
# are also values above 1. The approach implemented here avoids that
# situation
temp_max = self.lowBound*10
all_HP = np.linspace(self.lowBound, temp_max, num=self.hpCount)
while temp_max < self.upperBound:
temp_min = deepcopy(temp_max)
temp_max = temp_max*10
if temp_max > self.upperBound:
temp_max = self.upperBound
all_HP = np.append(all_HP, np.linspace(temp_min, temp_max, num=self.hpCount))
# randomly combine the options for the hyperparameters into the hyperparameter sets
self.fusedModelHP = np.zeros((self.hpCount,self.nDim+1))
for i in range(self.hpCount):
for j in range(self.nDim+1):
self.fusedModelHP[i,j] = all_HP[np.random.randint(0,all_HP.shape[0])]
# create the evaluation points for determining the fused mean and
# variance
sampleSize = fusedPoints
if self.sampleScheme == "CompFunc":
sampleOption = "CompFunc"
else:
sampleOption = "LHS"
self.xFused, check = apply_constraints(sampleSize,
self.nDim, resolution=self.res,
A=self.A, b=self.b, Aeq=self.Aeq, beq=self.beq,
lb=self.lb, ub=self.ub, func=self.constr_func,
sampleScheme=sampleOption, opt_sample_size=False)
if check:
self.logger.debug("Fused Points - All constraints applied successfully {}/{}".format(self.xFused.shape[0], sampleSize))
else:
self.logger.critical("Fused Points - Sample Size NOT met due to constraints! Continue with {}/{} Samples".format(self.xFused.shape[0], sampleSize))
if not self.restore_calc:
self.logger.debug("Create Reification Object")
if self.multiObjective:
self.TMInitOutput = [np.array(self.TMInitOutput)[:,0],
np.array(self.TMInitOutput)[:,0]]
# build the reification object with the combined inputs and initial values
if self.reification:
self.ROMInitOutput = np.array(self.ROMInitOutput)
temp = [[],[]]
for pp in range(self.ROMInitOutput.shape[0]):
temp[0].append(self.ROMInitOutput[pp,:,0])
temp[1].append(self.ROMInitOutput[pp,:,1])
self.reificationObj = [model_reification(self.ROMInitInput, temp[0],
self.modelParam['model_l'],
self.modelParam['model_sf'],
self.modelParam['model_sn'],
self.modelParam['means'],
self.modelParam['std'],
self.modelParam['err_l'],
self.modelParam['err_sf'],
self.modelParam['err_sn'],
self.TMInitInput, self.TMInitOutput[0],
len(self.ROM), self.nDim, self.covFunc),
model_reification(self.ROMInitInput, temp[1],
self.modelParam['model_l'],
self.modelParam['model_sf'],
self.modelParam['model_sn'],
self.modelParam['means'],
self.modelParam['std'],
self.modelParam['err_l'],
self.modelParam['err_sf'],
self.modelParam['err_sn'],
self.TMInitInput, self.TMInitOutput[1],
len(self.ROM), self.nDim, self.covFunc)]
else:
if self.surrogate == "GP":
self.modelGP = [gp_model(self.TMInitInput, self.TMInitOutput[0],
np.ones((self.nDim)), 1, 0.05,
self.nDim, self.covFunc),
gp_model(self.TMInitInput, self.TMInitOutput[1],
np.ones((self.nDim)), 1, 0.05,
self.nDim, self.covFunc)]
else:
self.modelGP = [bmarsModel(self.TMInitInput, self.TMInitOutput[0]),
bmarsModel(self.TMInitInput, self.TMInitOutput[1])]
else:
# build the reification object with the combined inputs and initial values
if self.reification:
self.reificationObj = model_reification(self.ROMInitInput, self.ROMInitOutput,
self.modelParam['model_l'],
self.modelParam['model_sf'],
self.modelParam['model_sn'],
self.modelParam['means'],
self.modelParam['std'],
self.modelParam['err_l'],
self.modelParam['err_sf'],
self.modelParam['err_sn'],
self.TMInitInput, self.TMInitOutput,
len(self.ROM), self.nDim, self.covFunc)
else:
if self.surrogate == "GP":
self.modelGP = gp_model(self.TMInitInput, self.TMInitOutput,
np.ones((self.nDim)), 1, 0.05,
self.nDim, self.covFunc)
elif self.surrogate == "BMARS":
self.modelGP = bmarsModel(self.TMInitInput, self.TMInitOutput)
self.allTMInput = []
self.allTMOutput = []
self.tmBudgetLeft = self.tmBudget
self.totalBudgetLeft = self.totalBudget
self.currentIteration += 1
self.tmIterCount = 0
self.logger.info("Reification Object Initialized. Ready for Calculations")
def __restart_subs(self):
# This function restarts the sub processes if they have been closed
# while doing the Truth Model evaluations
for kk in range(self.multinode):
try:
os.remove("{}/subprocess/close{}".format(self.workingDir, kk))
os.remove("{}/subprocess/sub{}.control".format(self.workingDir, kk))
os.remove("{}/subprocess/sub{}.start".format(self.workingDir, kk))
self.logger.debug("Close File {} removed".format(kk))
except FileNotFoundError:
self.logger.debug("Close File {} does not exist".format(kk))
calcPerProcess, all_started = self.__start_subprocesses__(self.multinode)
subProcessWait = True
while subProcessWait:
if all_started:
subProcessWait = False
else:
total_started = 0
for fname in range(self.multinode):
if os.path.exists("{}/subprocess/sub{}.start".format(self.workingDir, fname)):
total_started += 1
if total_started == self.multinode:
all_started = True
self.logger.info("All Subprocess Jobs Started Successfully")
def __run_multinode_acq_func(self, x_test, new_mean, calcPerProcess):
# This function controls the parameter setup and transfer for the
# evaluation of the acquisition functions to determine the next best
# points for evaluating the Reduced Order Models when using subprocesses
self.logger.info("Set Up Parameters for Acquisition Function Evaluation and submit to Subprocesses")
parameters = []
parameterFileData = []
sub_fnames = []
count = 0
sub_count = 0
parameterIndex = 0
parameterFileIndex = 0
# Pickle the reification object to be loaded by each of the subprocesses
# this reduces the amount of memory that needs to be transferred
with open("data/reificationObj", 'wb') as f:
dump(self.reificationObj, f)
# set up the parameters to be used in the calculations
for jj in range(len(self.ROM)):
for kk in range(self.sampleCount):
if self.multiObjective:
means = [np.expand_dims(np.array([new_mean[jj][0][kk]]), axis=0),
np.expand_dims(np.array([new_mean[jj][1][kk]]), axis=0)]
model_temp = [means, self.goal, self.MORef, self.pareto[0]]
else:
model_temp = [np.expand_dims(x_test[kk], axis=0),
np.expand_dims(np.array([new_mean[jj][kk]]), axis=0),
jj]
if self.batch:
for mm in range(self.hpCount):
parameterFileData.append((self.currentIteration+1, model_temp, self.xFused, self.fusedModelHP[mm,:],
self.covFunc, x_test, jj, kk, mm, self.sampleCount,
self.modelParam['costs'], self.maxTM))
parameters.append([parameterIndex, parameterFileIndex])
parameterIndex += 1
# store every 1000 set of parameters in a file for use in the
# subprocesses
if len(parameterFileData) == 1000:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
parameterFileData = []
parameterFileIndex += 1
parameterIndex = 0
count += 1
if count == calcPerProcess:
fname = "{}".format(sub_count)
sub_fnames.append(fname)
# Send the trigger for the subprocess to pick up the data for
# the calculations
with open("{}/subprocess/sub{}.control".format(self.workingDir, sub_count), 'wb') as f:
control_param = [0, "iteration", self.acquisitionFunc]
dump(control_param, f)
# dump the index for the parameter files for the subprocess
# to load
with open("{}/subprocess/{}.dump".format(self.workingDir, fname), 'wb') as f:
dump(parameters, f)
parameters = []
count = 0
sub_count += 1
else:
parameterFileData.append((self.currentIteration+1, model_temp, self.xFused, self.fusedHP,
self.covFunc, x_test, jj, kk, 0, self.sampleCount,
self.modelParam['costs'], self.maxTM))
parameters.append([parameterIndex, parameterFileIndex])
parameterIndex += 1
# store every 1000 set of parameters in a file for use in the
# subprocesses
if len(parameterFileData) == 1000:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
parameterFileData = []
parameterFileIndex += 1
parameterIndex = 0
count += 1
if count == calcPerProcess:
fname = "{}".format(sub_count)
sub_fnames.append(fname)
# Send the trigger for the subprocess to pick up the data for
# the calculations
with open("{}/subprocess/sub{}.control".format(self.workingDir, sub_count), 'wb') as f:
control_param = [0, "iteration", self.acquisitionFunc]
dump(control_param, f)
# dump the index for the parameter files for the subprocess
# to load
with open("{}/subprocess/{}.dump".format(self.workingDir, fname), 'wb') as f:
dump(parameters, f)
parameters = []
count = 0
sub_count += 1
# dump the last of the parameter datasets
if len(parameterFileData) != 0:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
# trigger the last subprocess and dump the index parameters
if parameters != []:
fname = "{}".format(sub_count)
sub_fnames.append(fname)
with open("{}/subprocess/sub{}.control".format(self.workingDir, sub_count), 'wb') as f:
control_param = [0, "iteration", self.acquisitionFunc]
dump(control_param, f)
with open("{}/subprocess/{}.dump".format(self.workingDir, fname), 'wb') as f:
dump(parameters, f)
self.logger.info("Start Waiting for Results to Complete")
# the calculations will take some time, so start a sleep timer to wait
# for a minute before starting to check for results
calc_start = time()
sleep(10)
finished = 0
process_costs = np.zeros((len(sub_fnames)))
# check for finished subprocess calculations, and only continue once
# all subprcesses calculations are completed
while finished < len(sub_fnames):
finished = 0
proc_count = 0
for sub_name in sub_fnames:
with open("{}/subprocess/sub{}.control".format(self.workingDir, sub_name), 'rb') as f:
control_param = load(f)
if control_param[0] == 1:
finished += 1
if process_costs[proc_count] == 0:
# When a subprocess has completed, record how long
# the subprocess ran for. This is the cost of the
# subprocess calculation
process_costs[proc_count] = time()-calc_start
if finished < len(sub_fnames):
sleep(10)
self.logger.info("Acquisition Function Evaluations Completed")
# Calculate the total subprocess cost.
process_cost = np.sum(process_costs)
# extract all the outputs from the subprocesses and collate them
# into a single array
kg_output = []
for sub_name in sub_fnames:
cont_loop = True
load_failed = True
timer = 0
while cont_loop:
try:
with open("{}/subprocess/{}.output".format(self.workingDir, sub_name), 'rb') as f:
try:
sub_output = load(f)
except EOFError:
raise FileNotFoundError
load_failed = False
cont_loop = False
except FileNotFoundError:
sleep(10)
timer += 30
if timer > 300:
cont_loop = False
if not load_failed:
self.logger.debug("sub_output {} found | length: {}".format(sub_name, len(sub_output)))
for jj in range(len(sub_output)):
kg_output.append(sub_output[jj])
os.remove("{}/subprocess/{}.output".format(self.workingDir, sub_name))
os.remove("{}/subprocess/{}.dump".format(self.workingDir, sub_name))
else:
self.logger.debug("sub_output {} NOT found".format(len(sub_name)))
self.logger.debug("Calculation Results retrieved from Subprocess Jobs")
return kg_output, process_cost
def __run_singlenode_acq_func(self, x_test, new_mean):
# As before, this function calculates the acquisition function values
# for determining the next best points to be queried from the reduced
# order models. This function runs the concurrent.futures calculations
# directly.
parameters = []
parameterFileData = []
count = 0
parameterIndex = 0
parameterFileIndex = 0
self.logger.debug("Set Up Parameters for Acquisition Function Evaluation")
# Save the current reification object to a file for loading
with open("data/reificationObj", 'wb') as f:
dump(self.reificationObj, f)
# Define the parameters for each calculation
for jj in range(len(self.ROM)):
for kk in range(self.sampleCount):
if self.multiObjective:
means = [np.expand_dims(np.array([new_mean[jj][0][kk]]), axis=0),
np.expand_dims(np.array([new_mean[jj][1][kk]]), axis=0)]
model_temp = [means, self.goal, self.MORef, self.pareto[0]]
else:
model_temp = [np.expand_dims(x_test[kk], axis=0),
np.expand_dims(np.array([new_mean[jj][kk]]), axis=0),
jj]
if self.batch:
for mm in range(self.hpCount):
parameterFileData.append((self.currentIteration+1, model_temp, self.xFused, self.fusedModelHP[mm,:],
self.covFunc, x_test, jj, kk, mm, self.sampleCount,
self.modelParam['costs'], self.maxTM))
parameters.append([parameterIndex, parameterFileIndex])
parameterIndex += 1
# save each 1000 parameter sets to a file to reduce the amount of memory used
if len(parameterFileData) == 1000:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
parameterFileData = []
parameterFileIndex += 1
parameterIndex = 0
count += 1
else:
parameterFileData.append((self.currentIteration+1, model_temp, self.xFused, self.fusedHP,
self.covFunc, x_test, jj, kk, 0, self.sampleCount,
self.modelParam['costs'], self.maxTM))
parameters.append([parameterIndex, parameterFileIndex])
parameterIndex += 1
# save each 1000 parameter sets to a file to reduce the amount of memory used
if len(parameterFileData) == 1000:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
parameterFileData = []
parameterFileIndex += 1
parameterIndex = 0
count += 1
# save the last of the parameters sets
if len(parameterFileData) != 0:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
# set which acquistion function will be used
if self.acquisitionFunc == "EI":
acqFunc = calculate_EI
elif self.acquisitionFunc == "KG":
acqFunc = calculate_KG
elif self.acquisitionFunc == "TS":
acqFunc = calculate_TS
elif self.acquisitionFunc == "PI":
acqFunc = calculate_PI
elif self.acquisitionFunc == "UCB":
acqFunc = calculate_UCB
elif self.acquisitionFunc == "Hedge":
acqFunc = calculate_GPHedge
elif self.acquisitionFunc == "Greedy":
acqFunc = calculate_Greedy
elif self.acquisitionFunc == "EHVI":
acqFunc = calculate_EHVI
kg_output = []
# Start the concurrent calculations and return the output array
self.logger.info("Start Acquisition Function Evaluations for {} Parameter Sets".format(len(parameters)))
try:
self.pool.terminate()
self.pool = Pool()
except AttributeError:
self.pool = Pool()
with self.pool as executor:
for result_from_process in zip(parameters, executor.map(acqFunc,parameters)):
params, results = result_from_process
kg_output.append(results)
self.logger.info("Acquisition Function Evaluations Completed")
return kg_output, 0
def __run_multinode_fused(self, tm_test):
# As with the reduced order model calculations, this function evaluates
# the selected acquisition function to determine the next best points to
# evaluate from the Truth model
# Since this set of calculations uses only the hyperparameter count,
# a new calculation is needed to determine how many calculations to
# do on each subprocess
calc_limit = (-(-self.hpCount//self.multinode))
self.logger.debug("Define Parameters for Max Value Evaluations")
parameters = []
parameterFileData = []
parameterIndex = 0
parameterFileIndex = 0
count = 0
sub_count = 0
sub_fnames = []
# Save the reification object to a file
with open("data/reificationObj", 'wb') as f:
dump(self.reificationObj, f)
if self.multiObjective:
extra_data = [self.pareto[0], self.goal, self.MORef]
else:
extra_data = []
for mm in range(self.hpCount):
parameterFileData.append((self.currentIteration+1, extra_data, self.xFused, self.fusedModelHP[mm,:],
self.covFunc, tm_test, self.maxTM, 0.01, self.tmSampleOpt))
parameters.append([parameterIndex, parameterFileIndex])
parameterIndex += 1
count += 1
# Save every 500 parameter sets to a separate file to reduce memory
# usage
if len(parameterFileData) == 500:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
parameterFileData = []
parameterFileIndex += 1
parameterIndex = 0
if count == calc_limit:
fname = "{}".format(sub_count)
sub_fnames.append(fname)
# Trigger the subprocesses with a new calculation set
with open("{}/subprocess/sub{}.control".format(self.workingDir, sub_count), 'wb') as f:
control_param = [0, "fused", self.acquisitionFunc]
dump(control_param, f)
# save the parameter indices to a file
with open("{}/subprocess/{}.dump".format(self.workingDir, fname), 'wb') as f:
dump(parameters, f)
parameters = []
count = 0
sub_count += 1
# save the last of the parameter sets to a file
if len(parameterFileData) != 0:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
if parameters != []:
fname = "{}".format(sub_count)
sub_fnames.append(fname)
# Trigger the final subprocess to start calculations
with open("{}/subprocess/sub{}.control".format(self.workingDir, sub_count), 'wb') as f:
control_param = [0, "fused", self.acquisitionFunc]
dump(control_param, f)
# dump the parameter indices to a file
with open("{}/subprocess/{}.dump".format(self.workingDir, fname), 'wb') as f:
dump(parameters, f)
self.logger.info("Parameters for Max Value Calculations Sent to Subprocess")
# wait for calculations to finish
sleep(10)
finished = 0
# check that all calculations have completed before continuing
while finished < len(sub_fnames):
finished = 0
for sub_name in sub_fnames:
with open("{}/subprocess/sub{}.control".format(self.workingDir, sub_name), 'rb') as f:
control_param = load(f)
if control_param[0] == 1:
finished += 1
if finished < len(sub_fnames):
sleep(10)
fused_output = []
# Extract the outputs from the individual subprocess output files and
# collate into a single array
for sub_name in sub_fnames:
cont_loop = True
load_failed = True
timer = 0
while cont_loop:
try:
with open("{}/subprocess/{}.output".format(self.workingDir, sub_name), 'rb') as f:
sub_output = load(f)
load_failed = False
cont_loop = False
except FileNotFoundError:
sleep(10)
timer += 30
if timer > 300:
cont_loop = False
if not load_failed:
self.logger.debug("sub_output {} found | length: {}".format(sub_name, len(sub_output)))
for jj in range(len(sub_output)):
fused_output.append(sub_output[jj])
os.remove("{}/subprocess/{}.output".format(self.workingDir, sub_name))
os.remove("{}/subprocess/{}.dump".format(self.workingDir, sub_name))
else:
self.logger.debug("sub_output {} NOT found".format(len(sub_name)))
# change the format of the output array to be a numpy array
fused_output = np.array(fused_output, dtype=object)
if fused_output.shape[0] == 0:
fused_output = np.array([[0,0]])
self.logger.info("Max Value Calculations Completed")
return fused_output
def __run_singlenode_fused(self, tm_test):
# This function achieves the same functionality as the multi-node fused
# function above, but does it all on the base node, rather than sending the
# data to subprocesses.
parameters = []
parameterFileData = []
# initialize the parameters for the fused model calculations and
# start the calculation
self.logger.debug("Define Parameters for Max Value Evaluations")
parameterIndex = 0
parameterFileIndex = 0
# save the reification object to a separate file
with open("data/reificationObj", 'wb') as f:
dump(self.reificationObj, f)
if self.multiObjective:
extra_data = [self.pareto[0], self.goal, self.MORef]
else:
extra_data = []
if self.batch:
for mm in range(self.hpCount):
parameterFileData.append((self.currentIteration+1, extra_data, self.xFused, self.fusedModelHP[mm,:],
self.covFunc, tm_test, self.maxTM, 0.01, self.tmSampleOpt))
parameters.append([parameterIndex, parameterFileIndex])
parameterIndex += 1
# Save each set of 500 parameters to a separate file
if len(parameterFileData) == 500:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
parameterFileData = []
parameterFileIndex += 1
parameterIndex = 0
else:
parameterFileData.append((self.currentIteration+1, extra_data, self.xFused, self.fusedHP,
self.covFunc, tm_test, self.maxTM, 0.01, self.tmSampleOpt))
parameters.append([parameterIndex, parameterFileIndex])
parameterIndex += 1
# Save each set of 500 parameters to a separate file
if len(parameterFileData) == 500:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
parameterFileData = []
parameterFileIndex += 1
parameterIndex = 0
# Save the last of the parameter sets to a file
if len(parameterFileData) != 0:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
# Set up a list of outputs for each of the results from the acquisition
# functions if using the GP Hedge approach
if self.tmSampleOpt == "Hedge":
fused_out = [[],[],[],[],[],[]]
else:
# Create just a single list for when using other Acquisition Functions
fused_output = []
self.logger.info("Start Max Value Calculations | {} Sets".format(len(parameters)))
count = 0
if self.multiObjective:
func = fused_EHVI
else:
func = fused_calculate
# Run the concurrent processes and save the outputs
try:
self.pool.terminate()
self.pool = Pool()
except AttributeError:
self.pool = Pool()
with self.pool as executor:
for result_from_process in zip(parameters, executor.map(func,parameters)):
params, results = result_from_process
if self.tmSampleOpt == "Hedge":
fused_out[0].append(results[0][0])
fused_out[1].append(results[0][1])
fused_out[2].append(results[0][2])
fused_out[3].append(results[0][3])
fused_out[4].append(results[0][4])
fused_out[5].append(results[0][5])
else:
fused_output.append(results[0])
count += 1
# When using the GP Hedge approach the list of outputs are returned
# as-is
if self.tmSampleOpt == "Hedge":
return fused_out
# when using other acquisition functions process the output to attempt
# the removal of all duplicates and then return the processed output
max_values = np.zeros((results[1],2))
for ii in range(len(fused_output)):
if max_values[fused_output[ii][1],0] != 0:
if max_values[fused_output[ii][1],0] < fused_output[ii][0]:
max_values[fused_output[ii][1],0] = fused_output[ii][0]
max_values[fused_output[ii][1],1] = fused_output[ii][1]
else:
max_values[fused_output[ii][1],0] = fused_output[ii][0]
max_values[fused_output[ii][1],1] = fused_output[ii][1]
fused_output = max_values[np.where(max_values[:,0]!=0)]
if fused_output.shape[0] == 0:
fused_output = np.array([[0,0]])
self.logger.info("Max Value Calculations Completed")
return fused_output
def __call_ROM(self, medoid_out, x_val):
# This function serves to evaluate the Reduced Order Models at the
# determined points. This is done in parallel to reduce the time taken
params = []
count = np.zeros((len(self.ROM)+1))
if self.multiObjective:
current = np.array(self.iterationData.iloc[:,4:5+len(self.ROM)])[-1,:]
else:
current = np.array(self.iterationData.iloc[:,3:])[-1,:]
count[0:len(self.ROM)] = current[1:]
count[-1] = current[0]
param_index = 0
# Define the parameter sets needed for each calculation
self.logger.debug("Define Parameters for ROM Function Evaluations")
for iii in range(medoid_out.shape[0]):
params.append({"Model Index":medoid_out[iii,3],
"Model":self.ROM[medoid_out[iii,3]],
"Input Values":x_val[iii,:],
"ParamIndex":param_index})
param_index += 1
temp_x = np.zeros((len(params), self.nDim))
if self.multiObjective:
temp_y = np.zeros((len(params),2))
else:
temp_y = np.zeros(len(params))
temp_index = np.zeros(len(params))
costs = np.zeros(len(params))
passed_calcs = []
# Run the concurrent calculations and extract the results
self.logger.info("Start ROM Function Evaluations | {} Calculations".format(len(params)))
try:
self.pool.terminate()
self.pool = Pool()
except AttributeError:
self.pool = Pool()
with self.pool as executor:
for result_from_process in zip(params, executor.map(call_model, params)):
par, results = result_from_process
costs[par["ParamIndex"]] += self.modelCosts[par["Model Index"]]
# if the truth function fails to evaluate, it should return false
# and therefore the results are not included in the output
try:
test = results.shape
results_evaluate = True
passed_calcs.append(par["ParamIndex"])
except AttributeError:
results_evaluate = False
# if self.multiObjective:
# try:
# if results == False:
# results_evaluate = False
# except ValueError:
# results_evaluate = True
# passed_calcs.append(par["ParamIndex"])
# else:
# if results != False:
# results_evaluate = True
# passed_calcs.append(par["ParamIndex"])
# else:
# results_evaluate = False
if results_evaluate:
if len(results.shape) == 1:
results = np.expand_dims(results, axis=0)
if self.multiObjective:
results = self.goal*results
temp_y[par["ParamIndex"],:] = results
else:
results = self.goal*results
temp_y[par["ParamIndex"]] = results
temp_x[par["ParamIndex"],:] = par["Input Values"]
temp_index[par["ParamIndex"]] = par["Model Index"]
if self.multiObjective:
self.reificationObj[0].update_GP(par["Input Values"], results[0,0], par["Model Index"])
self.reificationObj[1].update_GP(par["Input Values"], results[0,1], par["Model Index"])
else:
self.reificationObj.update_GP(par["Input Values"], results, par["Model Index"])
count[par["Model Index"]] += 1
# Remove any calculations that failed from the output and save the
# data
temp_x = temp_x[passed_calcs]
temp_y = temp_y[passed_calcs]
temp_index = temp_index[passed_calcs]
return temp_x, temp_y, temp_index, costs, count, len(passed_calcs)
def __call_Truth(self, params, count):
# This function evaluates the truth model at the points defined by the
# framework. The parameters for the calculation are defined elsewhere
# and this framework just runs the evaluations
temp_x = np.zeros((len(params), self.nDim))
if self.multiObjective:
temp_y = np.zeros((len(params),2))
else:
temp_y = np.zeros(len(params))
temp_index = np.zeros(len(params))
costs = np.zeros(len(params))
passed_calcs = []
# Run the concurrent calculations and extract the results
self.logger.info("Start Truth Model Evaluations | {} Sets".format(len(params)))
try:
self.pool.terminate()
self.pool = Pool()
except AttributeError:
self.pool = Pool()
with self.pool as executor:
for result_from_process in zip(params, executor.map(call_model, params)):
par, results = result_from_process
costs[par["ParamIndex"]] += self.modelCosts[par["Model Index"]]
# if the truth function fails to evaluate, it should return false
# and therefore the results are not included in the output
try:
test = results.shape
results_evaluate = True
passed_calcs.append(par["ParamIndex"])
except AttributeError:
results_evaluate = False
# if self.multiObjective:
# try:
# if results == False:
# results_evaluate = False
# except ValueError:
# results_evaluate = True
# passed_calcs.append(par["ParamIndex"])
# else:
# if results != False:
# results_evaluate = True
# passed_calcs.append(par["ParamIndex"])
# else:
# results_evaluate = False
if results_evaluate:
if len(results.shape) == 1:
results = np.expand_dims(results, axis=0)
if self.multiObjective:
results = self.goal*results
temp_y[par["ParamIndex"],:] = results
else:
results = self.goal*results
temp_y[par["ParamIndex"]] = results
temp_x[par["ParamIndex"],:] = par["Input Values"]
temp_index[par["ParamIndex"]] = par["Model Index"]
count[par["Model Index"]] += 1
if self.multiObjective:
if self.reification:
self.reificationObj[0].update_truth(par["Input Values"], results[0,0])
self.reificationObj[1].update_truth(par["Input Values"], results[0,1])
else:
self.modelGP[0].update(par["Input Values"], results[0,0], 0.05, False)
self.modelGP[1].update(par["Input Values"], results[0,1], 0.05, False)
else:
if self.reification:
self.reificationObj.update_truth(par["Input Values"], results)
else:
self.modelGP.update(par["Input Values"], results, 0.05, False)
# Remove any calculations that failed from the output and save the
# data
if passed_calcs != []:
temp_x = temp_x[passed_calcs]
temp_y = temp_y[passed_calcs]
temp_index = temp_index[passed_calcs]
self.logger.info("Truth Model Evaluations Completed")
self.__add_to_evaluatedPoints(temp_index, temp_x, temp_y)
self.totalBudgetLeft -= self.batchSize*self.modelCosts[-1]
if self.multiObjective:
if np.max(temp_y[:,0]) > self.maxTM[0]:
self.maxTM[0] = np.max(temp_y[:,0])
if np.max(temp_y[:,1]) > self.maxTM[1]:
self.maxTM[1] = np.max(temp_y[:,1])
else:
if np.max(temp_y) > self.maxTM:
self.maxTM = np.max(temp_y)
else:
self.logger.critical("All Truth Model Evaluations Failed to Produce Results! Continue with no new results.")
# Return the updated model call counts
return count
def __singleAcqFuncApproach(self, x_test, new_mean, calcPerProcess):
# this function is set up to be used in conjunction with the GP Hedge
# approach to call the required acquisition function calls
if self.multinode > 0:
kg_output, process_cost = self.__run_multinode_acq_func(x_test,
new_mean,
calcPerProcess)
else:
kg_output, process_cost = self.__run_singlenode_acq_func(x_test,
new_mean)
return kg_output, process_cost
def __gpHedgeApproach(self, x_test, new_mean, calcPerProcess):
# This function is for using the GP Hedge Portfolio optimization appraoch
# Calculate the probabilities for each acquisition function
prob = self.gpHedgeProb/np.sum(self.gpHedgeProb)
# determine the index of the function with the highest probability
index_Max_prob = np.where(prob == np.max(prob))[0][0]
self.gpHedgeTrack.append(index_Max_prob)
# run the individual acquisition function evaluations
output, process_cost = self.__singleAcqFuncApproach(x_test, new_mean, calcPerProcess)
# the output will be a list of lists, choose the one corresponding to the
# maximum probability
kg_output = output[index_Max_prob]
clusters = []
# determine the batch of next best points for all acquisition function
# outputs for use in calculating the gain later
for ii in range(6):
cluster_output = np.array(output[ii])
# Cluster the acquisition function output
medoid_out = self.__kg_calc_clustering(cluster_output)
clusters.append(x_test[medoid_out[:,2].astype(int),:])
# save the clusters
with open("data/hedgeClusters", 'wb') as f:
dump(clusters, f)
# return the output from the selected function
return kg_output, process_cost
def __update_Hedge_Probabilities(self, models, x_val):
# at each iteration when using the GP Hedge approach it is necessary to
# calculate the gain associated with each acquisition function
# load the data, which is the clusters from each acquistion function output
with open("data/hedgeClusters", 'rb') as f:
clusters = load(f)
parameters = []
parameterFileData = []
# initialize the parameters for the fused model calculations and
# start the calculation
self.logger.debug("Define Parameters for Max Value Evaluations")
parameterIndex = 0
parameterFileIndex = 0
if self.reification:
with open("data/reificationObj", 'wb') as f:
dump(self.reificationObj, f)
else:
with open("data/reificationObj", 'wb') as f:
dump(self.modelGP, f)
# for each set of results, define the parameters and evaluate all the
# fused model GPs
for ii in range(6):
clusters[ii] = np.array(clusters[ii])
for mm in range(self.hpCount):
if models == "ROM":
parameterFileData.append((1, self.reification, self.xFused, self.fusedModelHP[mm,:],
self.covFunc, clusters[ii], self.maxTM, 0.01, ii))
elif models == "TM":
parameterFileData.append((1, self.reification, self.xFused, self.fusedModelHP[mm,:],
self.covFunc, clusters[ii], self.maxTM, 0.01, ii))
parameters.append([parameterIndex, parameterFileIndex])
parameterIndex += 1
# save each set of 500 parameters in a file
if len(parameterFileData) == 500:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
parameterFileData = []
parameterFileIndex += 1
parameterIndex = 0
# save the last set of parameters in a file
if len(parameterFileData) != 0:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
# run all the calculations concurrently and obtain the outputs
fused_output = [[],[],[],[],[],[]]
count = 0
try:
self.pool.terminate()
self.pool = Pool()
except AttributeError:
self.pool = Pool()
with self.pool as executor:
for result_from_process in zip(parameters, executor.map(evaluateFusedModel,parameters)):
params, results = result_from_process
fused_output[results[0]].append(results[1])
count += 1
# update the gain for each acquisition function for either the ROM or TM
if models == "ROM":
for ii in range(6):
mean_output = np.mean(np.array(fused_output[ii]).transpose(), axis=1)
self.gpHedgeHist[ii].append(np.max(mean_output))
if len(self.gpHedgeHist[ii]) > 2*self.tmIterLim:
self.gpHedgeHist[ii] = self.gpHedgeHist[ii][1:]
self.gpHedgeProb = np.sum(self.gpHedgeHist, axis=1)
elif models == "TM":
for ii in range(6):
mean_output = np.mean(np.array(fused_output[ii]).transpose(), axis=1)
self.gpHedgeHistTM[ii].append(np.max(mean_output))
if len(self.gpHedgeHistTM[ii]) > 2*self.tmIterLim:
self.gpHedgeHistTM[ii] = self.gpHedgeHistTM[ii][1:]
self.gpHedgeProbTM = np.sum(self.gpHedgeHistTM, axis=1)
def __singleAcqFused(self, tm_test):
# For the GP Hedge appraoch for the Truth Model, this functions
# calls the individual calculations in either single- or multi-node configuration
if self.multinode > 0:
fused_output = self.__run_multinode_fused(tm_test)
else:
fused_output = self.__run_singlenode_fused(tm_test)
return fused_output
def __hedgeFused(self, tm_test):
# This function controls the use of the GP Hedge appraoch in the calculation
# of the next best points for the Truth model
# calculate the most recent probabilities and determine which acquisition
# function has the maximum probability
prob = self.gpHedgeProbTM/np.sum(self.gpHedgeProbTM)
index_Max_prob = np.where(prob == np.max(prob))[0][0]
self.gpHedgeTrackTM.append(prob)
# obtain the outputs from the acquisition functions
output = self.__singleAcqFused(tm_test)
fused_output = output[index_Max_prob]
max_values = np.zeros((tm_test.shape[0],2))
# process the selected output to remove duplicates
for ii in range(len(fused_output)):
if max_values[fused_output[ii][1],0] != 0:
if max_values[fused_output[ii][1],0] < fused_output[ii][0]:
max_values[fused_output[ii][1],0] = fused_output[ii][0]
max_values[fused_output[ii][1],1] = fused_output[ii][1]
else:
max_values[fused_output[ii][1],0] = fused_output[ii][0]
max_values[fused_output[ii][1],1] = fused_output[ii][1]
fused_output = max_values[np.where(max_values[:,0]!=0)]
if fused_output.shape[0] == 0:
fused_output = np.array([[0,0]])
self.logger.info("Max Value Calculations Completed")
clust = []
# cluster all the outputs, for the calculation of the gain at the
# end of the iteration
for ii in range(6):
cluster_output = np.array(output[ii], dtype=object)
# Cluster the acquisition function output
try:
if cluster_output.shape[0] > self.batchSize:
# medoids, clusters = k_medoids(cluster_output, self.batchSize)
# kmedoids = KMedoids(n_clusters=self.batchSize, random_state=0).fit(cluster_output[:,0].reshape((-1,1)))
# medoids = kmedoids.medoid_indices_
# medoids = kmedoids_max(cluster_output[:,0].reshape((-1,1)), self.batchSize)
medoids = kmedoids_max(cluster_output, self.batchSize)
else:
medoids = []
for iii in range(cluster_output.shape[0]):
medoids.append(iii)
except:
# medoids, clusters = k_medoids(cluster_output, 1)
# kmedoids = KMedoids(n_clusters=self.batchSize, random_state=0).fit(cluster_output[:,0].reshape((-1,1)))
# medoids = kmedoids.medoid_indices_
# medoids = kmedoids_max(cluster_output[:,0].reshape((-1,1)), self.batchSize)
medoids = kmedoids_max(cluster_output, 1)
clust.append(np.array(tm_test[medoids,:], dtype=np.float))
# save the clusters for use later
with open("data/hedgeClusters", 'wb') as f:
dump(clust, f)
return fused_output
def __close_subs_on_error(func):
"""
If an error occurs during the optimization, a multinode calculation must
still close all subprocesses to avoid excessive computing hour costs
"""
def close_subs(self):
no_error = False
try:
func(self)
no_error = True
except Exception as err:
if str(err) == 'Framework Shut Down to Facilitate External ROM Calculations!':
self.logger.info(err)
no_error = True
else:
self.logger.critical("Optimization Code Failed - See Error Below")
self.logger.exception(err)
if self.multinode > 0:
for fname in range(self.multinode):
with open("{}/subprocess/close{}".format(self.workingDir, fname), 'w') as f:
f.write("Close Subprocess {}".format(fname))
return no_error
return close_subs
@__close_subs_on_error
def run_BAREFOOT(self):
"""
This is the main optimization control function which handles all the calculations
of the BAREFOOT Framework
"""
if self.batch:
self.logger.info("Start Full BAREFOOT Framework Calculation")
else:
self.logger.info("Start Reification Only Framework Calculation")
# Check if the calculation requires multiple nodes and start them if necessary
if self.multinode > 0:
calcPerProcess, all_started = self.__start_subprocesses__(self.multinode)
else:
calcPerProcess, all_started = (0, True)
self.ROM_Calc_Start = True
# Once all subprocesses have started, start the main calculation
if all_started:
start_process = True
while start_process:
if self.ROM_Calc_Start:
text_num = str(self.currentIteration)
self.logger.info("#########################################################")
self.logger.info("# Start Iteration : {} #".format("0"*(4-len(text_num))+text_num))
self.logger.info("#########################################################")
self.timeCheck = time()
# Check constraints and obtain latin-hypercube sampled test points
evalP = []
for pp in range(len(self.ROM)):
evalP.append(np.array(self.evaluatedPoints.loc[self.evaluatedPoints['Model Index']==pp,self.inputLabels]))
x_test, check = apply_constraints(self.sampleCount,
self.nDim, resolution=self.res,
A=self.A, b=self.b, Aeq=self.Aeq, beq=self.beq,
lb=self.lb, ub=self.ub, func=self.constr_func,
sampleScheme=self.sampleScheme,opt_sample_size=True,
evaluatedPoints=evalP)
# If constraints can't be satisfied, notify the user in the log
if check:
self.logger.debug("ROM - All constraints applied successfully {}/{}".format(x_test.shape[0], self.sampleCount))
else:
self.logger.critical("ROM - Sample Size NOT met due to constraints! Continue with {}/{} Samples".format(x_test.shape[0], self.sampleCount))
if self.multiObjective:
if self.reification:
new_mean = []
# obtain predictions from the low-order GPs
for iii in range(len(self.ROM)):
new1, var1 = self.reificationObj[0].predict_low_order(x_test, iii)
new2, var2 = self.reificationObj[1].predict_low_order(x_test, iii)
new_mean.append([new1, new2])
else:
new_mean = []
new1, var1 = self.modelGP[0].predict_var(x_test)
new2, var2 = self.modelGP[1].predict_var(x_test)
new_mean.append([new1, new2])
else:
if self.reification:
new_mean = []
# obtain predictions from the low-order GPs
for iii in range(len(self.ROM)):
new, var = self.reificationObj.predict_low_order(x_test, iii)
new_mean.append(new)
else:
new_mean, var = self.modelGP.predict_var(x_test)
# Calculate the Acquisition Function for each of the test points in each
# model for each set of hyperparameters
if self.acquisitionFunc == "Hedge":
kg_output, process_cost = self.__gpHedgeApproach(x_test, new_mean, calcPerProcess)
else:
kg_output, process_cost = self.__singleAcqFuncApproach(x_test, new_mean, calcPerProcess)
kg_output = np.array(kg_output, dtype=object)
# Cluster the acquisition function output
medoid_out = self.__kg_calc_clustering(kg_output)
model_cost = time()-self.timeCheck + process_cost
self.timeCheck = time()
if not self.externalROM:
# Call the reduced order models
temp_x, temp_y, temp_index, costs, count, check = self.__call_ROM(medoid_out, x_test[medoid_out[:,2].astype(int),:])
if check != 0:
self.__add_to_evaluatedPoints(temp_index, temp_x, temp_y)
if self.acquisitionFunc == "Hedge":
self.__update_Hedge_Probabilities("ROM", x_test)
else:
self.logger.critical("All ROM Evalutions Failed to produce a result! Continue with no new data")
else:
self.__external_ROM_data_save(medoid_out, x_test[medoid_out[:,2].astype(int),:])
# Set up external ROM
self.ROM_Calc_Start = False
self.__save_calculation_state()
sleep(10)
raise RuntimeWarning("Framework Shut Down to Facilitate External ROM Calculations!")
else:
temp_x, temp_y, temp_index, costs, count, check = self.__external_ROM_data_load(medoid_out, x_test[medoid_out[:,2].astype(int),:])
# Extract external ROM Data
self.ROM_Calc_Start = True
self.totalBudgetLeft -= np.sum(costs) + model_cost
self.tmBudgetLeft -= np.sum(costs) + model_cost
self.logger.info("ROM Function Evaluations Completed")
if (self.tmBudgetLeft < 0) or (self.tmIterCount == self.tmIterLim):
self.logger.info("Start Truth Model Evaluations")
evalP = [np.array(self.evaluatedPoints.loc[self.evaluatedPoints['Model Index']==-1,self.inputLabels])]
# create a test set that is dependent on the number of dimensions
tm_test, check = apply_constraints(self.fusedSamples,
self.nDim, resolution=self.res,
A=self.A, b=self.b, Aeq=self.Aeq, beq=self.beq,
lb=self.lb, ub=self.ub, func=self.constr_func,
sampleScheme=self.sampleScheme, opt_sample_size=True,
evaluatedPoints=evalP)
if check:
self.logger.debug("Truth Model Query - All constraints applied successfully")
else:
self.logger.critical("Truth Model Query - Some or All Constraints Could Not Be Applied! Continuing Without Constraints")
# Evaluate the acquistion function to determine the next best
# points to evaluate
if self.tmSampleOpt == "Hedge":
fused_output = self.__hedgeFused(tm_test)
else:
fused_output = self.__singleAcqFused(tm_test)
fused_output = np.array(fused_output)
if self.batch:
if fused_output.shape[0] > self.batchSize:
# medoids, clusters = k_medoids(fused_output[:,0].reshape((-1,1)), self.batchSize)
# kmedoids = KMedoids(n_clusters=self.batchSize, random_state=0).fit(fused_output[:,0].reshape((-1,1)))
# medoids = kmedoids.medoid_indices_
# medoids = kmedoids_max(fused_output[:,0].reshape((-1,1)), self.batchSize)
medoids = kmedoids_max(fused_output, self.batchSize)
else:
if self.batchSize != 0:
medoids = []
for iii in range(fused_output.shape[0]):
medoids.append(iii)
#storeObject([np.where(fused_output[:,0] == np.max(fused_output[:,0]))[0][0], medoids], "ReifiFusedMedoid-{}".format(self.currentIteration))
else:
max_index = np.where(fused_output[:,0] == np.max(fused_output[:,0]))[0][0]
medoids = [max_index]
# define the parameters for the Truth Model Evaluations
params = []
param_index = 0
self.logger.debug("Define Parameters for Truth Model Evaluations")
for iii in range(len(medoids)):
params.append({"Model Index":-1,
"Model":self.TM,
"Input Values":np.array(tm_test[int(fused_output[medoids[iii],1]),:], dtype=np.float),
"ParamIndex":param_index})
param_index += 1
if len(medoids) < self.batchSize:
for iii in range(self.batchSize - len(medoids)):
params.append({"Model Index":-1,
"Model":self.TM,
"Input Values":np.array(tm_test[np.random.randint(0,tm_test.shape[0]),:], dtype=np.float),
"ParamIndex":param_index})
param_index += 1
self.tmIterCount = 0
self.tmBudgetLeft = self.tmBudget
# If and external Truth Model is used, submit the data for
# saving to output
if self.externalTM:
self.__external_TM_data_save(params, count)
break
else:
# If the subprocesses need to be closed, close them
if not self.keepSubRunning:
for fname in range(self.multinode):
with open("{}/subprocess/close{}".format(self.workingDir, fname), 'w') as f:
f.write("Close Subprocess {}".format(fname))
self.logger.warning("Close Subprocess {}".format(fname))
# otherwise, query the Truth Model directly
count = self.__call_Truth(params, count)
if self.tmSampleOpt == "Hedge":
self.__update_Hedge_Probabilities("TM", tm_test)
# for multinode calculations, check if subprocesses are being kept
# running and restart if not
if self.keepSubRunning:
pass
else:
if (self.totalBudgetLeft < 0) or (self.currentIteration >= self.iterLimit):
pass
else:
if self.multinode != 0:
self.__restart_subs()
# save the required outputs
self.__add_to_iterationData(time()-self.timeCheck + model_cost, count)
self.timeCheck = time()
self.__save_output_dataframes()
# Update the reduced order models if they need to be retrained
if (self.tmBudgetLeft < 0) or (self.tmIterCount == self.tmIterLim):
if self.updateROMafterTM:
self.__update_reduced_order_models__()
self.__save_calculation_state()
self.logger.info("Iteration {} Completed Successfully".format(self.currentIteration))
if (self.totalBudgetLeft < 0) or (self.currentIteration >= self.iterLimit):
self.logger.info("#########################################################")
self.logger.info("# #")
self.logger.info("# Iteration or Budget Limit Met or Exceeded #")
self.logger.info("# BAREFOOT Calculation Completed #")
self.logger.info("# #")
self.logger.info("#########################################################")
start_process = False
self.currentIteration += 1
self.tmIterCount += 1
@__close_subs_on_error
def run_BATCH(self):
self.logger.info("Start Batch Only Framework Calculation")
start_process = True
while start_process:
text_num = str(self.currentIteration)
self.logger.info("#########################################################")
self.logger.info("# Start Iteration : {} #".format("0"*(4-len(text_num))+text_num))
self.logger.info("#########################################################")
self.timeCheck = time()
# Check constraints and obtain latin-hypercube sampled test points
evalP = []
for pp in range(len(self.ROM)):
evalP.append(np.array(self.evaluatedPoints.loc[self.evaluatedPoints['Model Index']==pp,self.inputLabels]))
x_test, check = apply_constraints(self.sampleCount,
self.nDim, resolution=self.res,
A=self.A, b=self.b, Aeq=self.Aeq, beq=self.beq,
lb=self.lb, ub=self.ub, func=self.constr_func,
sampleScheme=self.sampleScheme, opt_sample_size=True,
evaluatedPoints=evalP)
# If constraints can't be satisfied, notify the user in the log
if check:
self.logger.debug("ROM - All constraints applied successfully {}/{}".format(x_test.shape[0], self.sampleCount))
else:
self.logger.critical("ROM - Sample Size NOT met due to constraints! Continue with {}/{} Samples".format(x_test.shape[0], self.sampleCount))
parameters = []
paramFileData = []
count = np.zeros((len(self.ROM)+1))
if self.multiObjective:
current = np.array(self.iterationData.iloc[:,4:5+len(self.ROM)])[-1,:]
extra_data = [self.pareto[0], self.goal, self.MORef]
else:
current = np.array(self.iterationData.iloc[:,3:])[-1,:]
extra_data = []
count[0:len(self.ROM)] = current[1:]
count[-1] = current[0]
parameterIndex = 0
parameterFileIndex = 0
with open("data/reificationObj", 'wb') as f:
dump(self.modelGP, f)
for jj in range(self.hpCount):
paramFileData.append((self.currentIteration+1, x_test, self.fusedModelHP[jj,:],
self.maxTM, self.tmSampleOpt, extra_data))
parameters.append([parameterIndex, parameterFileIndex])
parameterIndex += 1
# save each 1000 parameter sets to a file to reduce the amount of memory used
if len(paramFileData) == 1000:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(paramFileData, f)
paramFileData = []
parameterFileIndex += 1
parameterIndex = 0
# dump the last of the parameter datasets
if len(paramFileData) != 0:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(paramFileData, f)
# Set up a list of outputs for each of the results from the acquisition
# functions if using the GP Hedge approach
if self.tmSampleOpt == "Hedge":
prob = self.gpHedgeProbTM/np.sum(self.gpHedgeProbTM)
index_Max_prob = np.where(prob == np.max(prob))[0][0]
self.gpHedgeTrackTM.append(prob)
kg_out = [[],[],[],[],[],[]]
else:
# Create just a single list for when using other Acquisition Functions
kg_output = []
# Start the concurrent calculations and return the output array
self.logger.info("Start Acquisition Function Evaluations for {} Parameter Sets".format(len(parameters)))
try:
self.pool.terminate()
self.pool = Pool()
except AttributeError:
self.pool = Pool()
with self.pool as executor:
for result_from_process in zip(parameters, executor.map(batchAcquisitionFunc,parameters)):
params, results = result_from_process
if self.tmSampleOpt == "Hedge":
kg_out[0].append(results[0])
kg_out[1].append(results[1])
kg_out[2].append(results[2])
kg_out[3].append(results[3])
kg_out[4].append(results[4])
kg_out[5].append(results[5])
else:
kg_output.append(results)
self.logger.info("Acquisition Function Evaluations Completed")
def get_medoids(kg_output):
# Cluster the acquisition function output
if kg_output.shape[0] > self.batchSize:
medoids = kmedoids_max(kg_output[:,0:3], self.batchSize)
else:
medoids = []
for iii in range(kg_output.shape[0]):
medoids.append(iii)
return medoids
if self.tmSampleOpt == "Hedge":
clust = []
for pp in range(6):
kg_out[pp] = np.array(kg_out[pp])
#kg_out[pp][np.isinf(kg_out[pp])] = -1e16
kg_out[pp] = np.unique(kg_out[pp], axis=0)
med = get_medoids(kg_out[pp])
if pp == index_Max_prob:
medoids = med
kg_output = kg_out[pp]
index = np.array(kg_out[pp][med,1],dtype=np.uint8)
clust.append(np.array(x_test[index,:], dtype=np.float))
with open("data/hedgeClusters", 'wb') as f:
dump(clust, f)
else:
kg_output = np.array(kg_output)
kg_output = np.unique(kg_output, axis=0)
medoids = get_medoids(kg_output)
model_cost = time()-self.timeCheck
self.timeCheck = time()
# define the parameters for the Truth Model Evaluations
params = []
param_index = 0
self.logger.debug("Define Parameters for Model Evaluations")
for iii in range(len(medoids)):
params.append({"Model Index":-1,
"Model":self.TM,
"Input Values":np.array(x_test[int(kg_output[medoids[iii],1]),:], dtype=np.float),
"ParamIndex":param_index})
param_index += 1
count = self.__call_Truth(params, count)
if self.acquisitionFunc == "Hedge":
self.__update_Hedge_Probabilities("TM", x_test)
# save the required outputs
self.__add_to_iterationData(time()-self.timeCheck + model_cost, count)
self.timeCheck = time()
self.__save_output_dataframes()
# Update the reduced order models if they need to be retrained
if (self.tmBudgetLeft < 0) or (self.tmIterCount == self.tmIterLim):
if self.updateROMafterTM:
self.__update_reduced_order_models__()
self.__save_calculation_state()
self.logger.info("Iteration {} Completed Successfully".format(self.currentIteration))
if (self.totalBudgetLeft < 0) or (self.currentIteration >= self.iterLimit):
self.logger.info("#########################################################")
self.logger.info("# #")
self.logger.info("# Iteration or Budget Limit Met or Exceeded #")
self.logger.info("# BAREFOOT Calculation Completed #")
self.logger.info("# #")
self.logger.info("#########################################################")
start_process = False
self.currentIteration += 1
self.tmIterCount += 1
def run_optimization(self):
if self.reification:
return self.run_BAREFOOT()
else:
return self.run_BATCH()
def __kg_calc_clustering(self, kg_output):
# This function clusters the output from the Reduced Order Model stage
# acquistion function evaluations There is some processing required to
# obtain the correct format.
# convert to a numpy array for ease of indexing
# kg_output = np.array(kg_output, dtype=object)
point_selection = {}
self.logger.debug("Extract Points for Clustering from Acquisition Function Evaluations")
# process the output to obtain the correct format for the clustering
# (model index, acquisition function value, input index)
for iii in range(kg_output.shape[0]):
try:
if int(kg_output[iii,3]) in point_selection[kg_output[iii,2]]['models']:
if kg_output[iii,1] > point_selection[kg_output[iii,2]]['nu'][int(kg_output[iii,3])]:
point_selection[kg_output[iii,2]]['nu'][int(kg_output[iii,3])] = kg_output[iii,1]
point_selection[kg_output[iii,2]]['kg_out'][int(kg_output[iii,3])] = iii
else:
point_selection[kg_output[iii,2]]['models'].append(int(kg_output[iii,3]))
point_selection[kg_output[iii,2]]['nu'][int(kg_output[iii,3])] = kg_output[iii,1]
point_selection[kg_output[iii,2]]['kg_out'][int(kg_output[iii,3])] = iii
except KeyError:
point_selection[kg_output[iii,2]] = {'models':[int(kg_output[iii,3])],
'nu':[],
'kg_out':[]}
for mm in range(len(self.ROM)):
point_selection[kg_output[iii,2]]['nu'].append(1e-6)
point_selection[kg_output[iii,2]]['kg_out'].append(-1)
point_selection[kg_output[iii,2]]['nu'][int(kg_output[iii,3])] = kg_output[iii,1]
point_selection[kg_output[iii,2]]['kg_out'][int(kg_output[iii,3])] = iii
med_input = [[],[],[],[]]
for index in point_selection.keys():
for jjj in range(len(point_selection[index]['models'])):
med_input[0].append(point_selection[index]['nu'][point_selection[index]['models'][jjj]])
med_input[1].append(index)
med_input[2].append(point_selection[index]['models'][jjj])
med_input[3].append(point_selection[index]['kg_out'][point_selection[index]['models'][jjj]])
med_input = np.array(med_input).transpose()
# Since there may be too many duplicates when using small numbers of
# test points and hyper-parameters check to make sure and then return
# all the points if there are less than the required number of points
self.logger.debug("Cluster Acquistion Function Evaluations | {}".format(med_input.shape))
if self.batch:
if med_input.shape[0] > self.batchSize:
# medoids, clusters = k_medoids(med_input[:,0:3], self.batchSize)
# kmedoids = KMedoids(n_clusters=self.batchSize, random_state=0).fit(med_input[:,0].reshape((-1,1)))
# medoids = kmedoids.medoid_indices_
# medoids = kmedoids_max(med_input[:,0].reshape((-1,1)), self.batchSize)
medoids = kmedoids_max(med_input[:,0:3], self.batchSize)
else:
# medoids, clusters = k_medoids(med_input[:,0:3], 1)
# kmedoids = KMedoids(n_clusters=self.batchSize, random_state=0).fit(med_input[:,0].reshape((-1,1)))
# medoids = kmedoids.medoid_indices_
# medoids = kmedoids_max(med_input[:,0].reshape((-1,1)), 1)
medoids = kmedoids_max(med_input[:,0:3], med_input.shape[0])
#storeObject([np.where(med_input[:,0] == np.max(med_input[:,0]))[0][0], medoids], "ReifiROM-{}".format(self.currentIteration))
else:
max_index = np.where(med_input[:,0] == np.max(med_input[:,0]))[0][0]
medoids = [max_index]
# next, need to get the true values for each of the medoids and update the
# models before starting next iteration.
self.logger.debug("Extract True Values for Medoids")
medoid_index = []
for i in range(len(medoids)):
medoid_index.append(int(med_input[medoids[i],3]))
medoid_out = kg_output[medoid_index,:]
self.logger.info("Clustering of Acquisition Function Evaluations Completed")
return medoid_out
def __start_subprocesses__(self, subprocess_count):
# The subprocesses require a separate directory in the main BAREFOOT
# directory, so these need to be created if they don't exist
try:
os.mkdir('{}/subprocess'.format(self.workingDir))
self.logger.debug("Subprocess Directory Created")
except FileExistsError:
self.logger.debug("Subprocess Directory Already Exists")
pass
try:
os.mkdir('{}/subprocess/LSFOut'.format(self.workingDir))
self.logger.debug("LSFOut Directory Created")
except FileExistsError:
self.logger.debug("LSFOut Directory Already Exists")
pass
# These strings are used to create the job files for the subprocesses used
# when running the calculations in multi-node configuration
with open("{}/data/processStrings".format(self.workingDir), 'rb') as f:
processStrings = load(f)
self.logger.info("Strings for Subprocess Shell Files Loaded")
# extract the two process strings and calculate how many calculations
# will be done per subprocess
subProcessStr = processStrings[0]
runProcessStr = processStrings[1]
if self.batch and self.reification:
calculation_count = self.sampleCount*self.hpCount*(len(self.ROM))
elif self.batch and not self.reification:
calculation_count = self.hpCount
else:
calculation_count = self.sampleCount*(len(self.ROM))
if calculation_count % subprocess_count == 0:
calcPerProcess = int(calculation_count/subprocess_count)
else:
calcPerProcess = int(calculation_count/subprocess_count) + 1
self.logger.info("{} Subprocess Jobs | {} Calculations per Subprocess".format(subprocess_count, calcPerProcess))
# Start all subprocesses
for fname in range(subprocess_count):
with open("{}/subprocess/{}.sh".format(self.workingDir, fname), 'w') as f:
f.write(subProcessStr.format(fname))
with open("{}/subprocess/submit{}.sh".format(self.workingDir, fname), 'w') as f:
f.write(runProcessStr.format(fname))
os.chmod("{}/subprocess/submit{}.sh".format(self.workingDir, fname), 0o775)
subprocess.run(["{}/subprocess/submit{}.sh".format(self.workingDir, fname)], shell=True)
# wait for all subprocesses to start
all_pending = True
self.logger.info("Waiting for Subprocess Jobs to start")
count = 0
all_started = False
while all_pending:
sleep(10)
total_started = 0
for fname in range(subprocess_count):
if os.path.exists("{}/subprocess/sub{}.start".format(self.workingDir, fname)):
total_started += 1
count += 1
if total_started == subprocess_count:
all_pending = False
all_started = True
self.logger.info("All Subprocess Jobs Started Successfully")
# waiting for 2 hours for all the subprocesses to start will stop the waiting
# and return false from this function to say that all the processes weren't
# started yet. This is to save on computational hours if there is a problem
# but this functionality can be disabled if desired.
if count == 720:
all_pending = False
self.logger.critical("Subprocess Jobs Outstanding after 2 Hours | {}/{} Jobs Started".format(total_started, subprocess_count))
return calcPerProcess, all_started
def __update_reduced_order_models__(self):
# If the reduced order models are configured to be retrained after more
# truth model evaluations have been conducted, this function re-evaluates
# all the evaluated points and reconstructs the reification object with
# the new values.
self.train_func("results/{}/".format(self.calculationName))
self.logger.info("Recalculate all evaluated points for ROM to ensure correct model results are used")
self.ROMInitInput = []
self.ROMInitOutput = []
TMDataX = self.reificationObj.x_true
TMDataY = self.reificationObj.y_true
params = []
params_truth = []
count = []
param_index = 0
modelIter_record = []
for jj in range(len(self.ROM)+1):
count.append(0)
for jj in range(self.evaluatedPoints.shape[0]):
modelIter_record.append([self.evaluatedPoints.loc[jj,"Model Index"], self.evaluatedPoints.loc[jj,"Iteration"]])
if self.evaluatedPoints.loc[jj,"Model Index"] != -1:
params.append({"Model Index":self.evaluatedPoints.loc[jj,"Model Index"],
"Model":self.ROM[self.evaluatedPoints.loc[jj,"Model Index"]],
"Input Values":self.evaluatedPoints.loc[jj,self.inputLabels],
"ParamIndex":param_index})
else:
count[-1] += 1
params_truth.append({"Model Index":-1,
"Model":self.TM,
"Input Values":self.evaluatedPoints.loc[jj,self.inputLabels],
"ParamIndex":param_index,
"Evaluation": self.evaluatedPoints.loc[jj,"y"]})
param_index += 1
for ii in range(len(self.ROM)):
self.ROMInitInput.append(np.zeros_like(self.reificationObj.x_train[ii]))
self.ROMInitOutput.append(np.zeros_like(self.reificationObj.y_train[ii]))
temp_x = np.zeros((len(modelIter_record), self.nDim))
temp_y = np.zeros(len(modelIter_record))
temp_index = np.zeros(len(modelIter_record))
temp_iter = np.array(modelIter_record)
# Run the evaluations concurrently and store the outputs
try:
self.pool.terminate()
self.pool = Pool()
except AttributeError:
self.pool = Pool()
with self.pool as executor:
for result_from_process in zip(params, executor.map(call_model, params)):
par, results = result_from_process
if par["Model Index"] != -1:
self.ROMInitInput[par["Model Index"]][count[par["Model Index"]],:] = par["Input Values"]
self.ROMInitOutput[par["Model Index"]][count[par["Model Index"]]] = results
temp_x[par["ParamIndex"],:] = par["Input Values"]
temp_y[par["ParamIndex"]] = results
temp_index[par["ParamIndex"]] = par["Model Index"]
count[par["Model Index"]] += 1
for pp in range(len(params_truth)):
temp_x[params_truth[pp]["ParamIndex"]] = params_truth[pp]["Input Values"]
temp_y[params_truth[pp]["ParamIndex"]] = params_truth[pp]["Evaluation"]
temp_index[params_truth[pp]["ParamIndex"]] = -1
self.logger.info("Create New Reification Object")
# Recreate the reification object for further calculations
self.reificationObj = model_reification(self.ROMInitInput, self.ROMInitOutput,
self.modelParam['model_l'],
self.modelParam['model_sf'],
self.modelParam['model_sn'],
self.modelParam['means'],
self.modelParam['std'],
self.modelParam['err_l'],
self.modelParam['err_sf'],
self.modelParam['err_sn'],
TMDataX, TMDataY,
len(self.ROM), self.nDim, self.covFunc)
# save the new data
# Adds new data points to the evaluated datapoints dataframe
temp = np.zeros((temp_x.shape[0], self.nDim+3))
temp[:,0] = temp_index
temp[:,1] = temp_iter
temp[:,2] = temp_y
temp[:,3:] = temp_x
temp = pd.DataFrame(temp, columns=self.evaluatedPoints.columns)
self.evaluatedPoints = temp
self.__add_to_iterationData(time()-self.timeCheck, np.array(count))
self.timeCheck = time()
self.logger.info("New Evaluations Saved | Reification Object Updated")
def __external_TM_data_save(self, TMEvaluationPoints, count):
# When using an external Truth Model, it is necessary to save the next
# best points for use in the external calculations or experiments
outputData = np.zeros((len(TMEvaluationPoints), self.nDim+1))
for ii in range(len(TMEvaluationPoints)):
outputData[ii,0:self.nDim] = TMEvaluationPoints[ii]["Input Values"]
colNames = deepcopy(self.inputLabels)
if self.multiObjective:
colNames.append("y1")
colNames.append("y2")
else:
colNames.append("y")
outputData = | pd.DataFrame(outputData, columns=colNames) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Analyze incidence of inhibitory or activating interactions across taxon
import settings as S
import pandas as pd
import os
import numpy as np
import pdb
import scipy.stats as st
import matplotlib.pyplot as plt
from pandas.util.testing import assert_series_equal
from collections import Counter
plt.ion()
plt.close('all')
def norm_entropy( series ):
# entropy, normalized by maximum value = ln(# of entries)
norm_entropy = st.entropy( series )/series.shape[0]
return norm_entropy
def summarystring( subdf ):
# summarizes the entries in subdf
return ';'.join([item +':' + str(subdf.ix[item]) for item in subdf.index])
def literaturestring( subdf ):
# Summarizes literature references
litstring = ';'.join(subdf['Literature'])
litstring2 = ''.join(litstring.split(' '))
uqlit = np.unique( litstring2.split(';') )
return len(uqlit),';'.join(uqlit)
# Set some parameters
tax2use = 'kingdom'
minsize = 10
# Read in central carbon metabolism reactions
ccm = S.read_cache('CCM_Reactions')
ccm['EcoliGene'] = ccm.index
ccm.index = ccm['EC']
reg = S.read_cache('regulation')
reg = reg[reg['Source'] == 'BRENDA'] # don't bias with just ecocyc/excluding remainder of biocyc
ki = reg[reg['Mode'] == '-']
act = reg[reg['Mode'] == '+']
#ki = S.get_data_df('inhibiting')
#act = S.read_cache('activating')
tax = S.read_cache('TaxonomicData') # was TaxonomicData_temp
# Drop entries without organism
ki = ki[pd.notnull(ki['Organism'])]
act = act[pd.notnull(act['Organism'])]
# Convert LigandID to string
ki['LigandID'] = ki['LigandID'].astype(str)
act['LigandID'] = act['LigandID'].astype(str)
# Annotate with taxonomy of choice
ki = ki[ki['Organism'].isin( tax.index )]
act = act[act['Organism'].isin( tax.index )]
ki_tax = tax.ix[ ki['Organism'], tax2use ]
ki_tax.index = ki.index
ki['taxonomy'] = ki_tax
act_tax = tax.ix[ act['Organism'], tax2use ]
act_tax.index = act.index
act['taxonomy'] = act_tax
# Drop null values
ki = ki[pd.notnull(ki['LigandID'])]
ki = ki[pd.notnull(ki['taxonomy'])]
act = act[pd.notnull(act['LigandID'])]
act = act[pd.notnull(act['taxonomy'])]
# We don't want duplicate measurements of the same EC:LigandID in the same organism
ki.index = [':'.join( [ki.at[row,'EC_number'],ki.at[row,'LigandID'],ki.at[row,'Organism']] ) for row in ki.index]
act.index = [':'.join([act.at[row,'EC_number'], act.at[row,'LigandID'], act.at[row,'Organism'] ]) for row in act.index]
ki = ki[~ki.index.duplicated()]
act = act[~act.index.duplicated()]
# Remove instances of inhibition where "no inhibition" is mentioned
noinhib_ix = [item for item in ki.index if 'no inhibition' not in str(ki.at[item,'Commentary']).lower() ]
ki = ki.ix[noinhib_ix,:]
# Now do some analysis
ki_merge = ki.groupby(['EC_number','LigandID'])
act_merge = act.groupby(['EC_number', 'LigandID'])
res = pd.DataFrame( columns = ['Type','Key','EC_number','LigandID','Compound','TotalEntries','Entropy','Summary','URL','NullEntropy','NullSummary','Literature','NumReferences','Organisms'] )
for dtype in ['ki','act']:
merge2use = ki_merge if dtype == 'ki' else act_merge
d2use = ki if dtype =='ki' else act
for g in merge2use.groups.keys():
if len(merge2use.groups[ g ]) >= minsize:
# Get counts for each taxon
ixname = dtype + ':' + ':'.join(list(g))
res.at[ixname,'Key'] = g
res.at[ixname,'Type'] = dtype
res.at[ixname,'EC_number'] = g[0]
res.at[ixname,'LigandID'] = g[1]
#res.at[ixname,'Compound'] = ';'.join(d2use.ix[ merge2use.groups[ g ],:]['Compound'].unique().astype(str))
res.at[ixname,'TotalEntries'] = len(merge2use.groups[ g ] )
subdf = d2use.ix[ merge2use.groups[ g ],:]
res.at[ixname,'Compound'] = ';'.join(subdf['LigandName'].unique())
res.at[ixname,'Entropy'] = norm_entropy( subdf['taxonomy'].value_counts() )
res.at[ixname,'Summary'] = summarystring( subdf['taxonomy'].value_counts() )
urladd = '#INHIBITORS' if dtype == 'ki' else '#ACTIVATING%20COMPOUND'
res.at[ixname,'URL'] = 'http://www.brenda-enzymes.org/enzyme.php?ecno=' + g[0] + urladd
# Get the literature references
uqlit,uqlitstring = literaturestring( subdf )
res.at[ixname,'Literature'] = uqlitstring
res.at[ixname,'NumReferences'] = uqlit
res.at[ixname,'Organisms'] = ';'.join(subdf['Organism'].unique())
# Also calculate the entropy of all regulators of this EC, to see if it is specific to this metabolite or related to all metabolites
bigdf = d2use[d2use['EC_number'] == g[0]]['taxonomy'].value_counts()
res.ix[ixname,'NullEntropy'] = norm_entropy( bigdf )
res.ix[ixname,'NullSummary'] = summarystring( bigdf )
# Calculate change in normalized entropy. Careful, this is not a proper statistical measure, just a heuristic!
res['DeltaEntropy'] = res['Entropy'] - res['NullEntropy']
# Write
res.to_csv('../res/Regulation_by_taxon.csv')
# Reduce to just highly confident interactions
res_reduced = res[ res['NumReferences'] >= 10]
res_reduced = res_reduced[ res_reduced['LigandID'] != '2' ]
# Keep only data with at least 10 references and ligand id 2 (= "more")
res_reduced.to_csv('../res/Regulation_by_taxon_highconfidence.csv')
# Reduce data to only the EC's in central carbon metabolism
res_reduced_EC = res_reduced[ res_reduced['EC_number'].isin(ccm['EC']) ]
res_reduced_EC['EcoliGene'] = np.nan
for ii in res_reduced_EC.index:
res_reduced_EC.at[ii,'EColigene'] = ccm.at[ res_reduced_EC.at[ii,'EC_number'],'EcoliGene' ]
# Keep only data with at least 10 references and ligand id 2 (= "more")
res_reduced_EC.to_csv('../res/Regulation_by_taxon_CCM.csv')
# Make a table indicating of the number of interactions for each species
ki_species = ki.groupby(['Organism'])
act_species = act.groupby(['Organism'])
uqspecies = set(ki_species.groups.keys()).union(act_species.groups.keys())
species_df = pd.DataFrame( columns = ['Inhibition','Activation'] )
for species in uqspecies:
if species in ki_species.groups.keys():
species_df.at[ species,'Inhibition'] = ki_species.groups[species].size
else:
species_df.at[ species,'Inhibition'] = 0
if species in act_species.groups.keys():
species_df.at[ species,'Activation'] = act_species.groups[species].size
else:
species_df.at[ species,'Activation'] = 0
species_df['Total'] = species_df['Activation'] + species_df['Inhibition']
species_df.to_csv('../res/Regulation_by_taxon_speciescounts.csv')
##########################################################################################
# A last little piece, compare to a prior version of the result to see if anything changed
oldres = | pd.read_csv('../oldres/March2017/Regulation_by_taxon.csv',header = 0,index_col = 0) | pandas.read_csv |
import wf_core_data.utils
import requests
import pandas as pd
from collections import OrderedDict
# import pickle
# import json
import datetime
import time
import logging
import os
logger = logging.getLogger(__name__)
DEFAULT_DELAY = 0.25
DEFAULT_MAX_REQUESTS = 50
DEFAULT_WRITE_CHUNK_SIZE = 10
SCHOOLS_BASE_ID = 'appJBT9a4f3b7hWQ2'
DATA_DICT_BASE_ID = 'appJBT9a4f3b7hWQ2'
# DATA_DICT_BASE_ID = 'appHMyIWgnHqVJymL'
class AirtableClient:
def __init__(
self,
api_key=None,
url_base='https://api.airtable.com/v0/'
):
self.api_key = api_key
self.url_base = url_base
if self.api_key is None:
self.api_key = os.getenv('AIRTABLE_API_KEY')
def fetch_tl_data(
self,
pull_datetime=None,
params=None,
base_id=SCHOOLS_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching TL data from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='TLs',
params=params
)
tl_data=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('teacher_id_at', record.get('id')),
('teacher_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('teacher_full_name_at', fields.get('Full Name')),
('teacher_first_name_at', fields.get('First Name')),
('teacher_middle_name_at', fields.get('Middle Name')),
('teacher_last_name_at', fields.get('Last Name')),
('teacher_title_at', fields.get('Title')),
('teacher_ethnicity_at', fields.get('Race & Ethnicity')),
('teacher_ethnicity_other_at', fields.get('Race & Ethnicity - Other')),
('teacher_income_background_at', fields.get('Income Background')),
('teacher_email_at', fields.get('Email')),
('teacher_email_2_at', fields.get('Email 2')),
('teacher_email_3_at', fields.get('Email 3')),
('teacher_phone_at', fields.get('Phone Number')),
('teacher_phone_2_at', fields.get('Phone Number 2')),
('teacher_employer_at', fields.get('Employer')),
('hub_at', fields.get('Hub')),
('pod_at', fields.get('Pod')),
('user_id_tc', fields.get('TC User ID'))
])
tl_data.append(datum)
if format == 'dataframe':
tl_data = convert_tl_data_to_df(tl_data)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return tl_data
def fetch_location_data(
self,
pull_datetime=None,
params=None,
base_id=SCHOOLS_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching location data from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Locations',
params=params
)
location_data=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('location_id_at', record.get('id')),
('location_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('location_address_at', fields.get('Address')),
('school_id_at', wf_core_data.utils.to_singleton(fields.get('School Name'))),
('school_location_start_at', wf_core_data.utils.to_date(fields.get('Start of time at location'))),
('school_location_end_at', wf_core_data.utils.to_date(fields.get('End of time at location')))
])
location_data.append(datum)
if format == 'dataframe':
location_data = convert_location_data_to_df(location_data)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return location_data
def fetch_teacher_school_data(
self,
pull_datetime=None,
params=None,
base_id=SCHOOLS_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching teacher school association data from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Teachers x Schools',
params=params
)
teacher_school_data=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('teacher_school_id_at', record.get('id')),
('teacher_school_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('teacher_id_at', fields.get('TL')),
('school_id_at', fields.get('School')),
('teacher_school_start_at', wf_core_data.utils.to_date(fields.get('Start Date'))),
('teacher_school_end_at', wf_core_data.utils.to_date(fields.get('End Date'))),
('teacher_school_active_at', wf_core_data.utils.to_boolean(fields.get('Currently Active')))
])
teacher_school_data.append(datum)
if format == 'dataframe':
teacher_school_data = convert_teacher_school_data_to_df(teacher_school_data)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return teacher_school_data
def fetch_school_data(
self,
pull_datetime=None,
params=None,
base_id=SCHOOLS_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching school data from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Schools',
params=params
)
school_data=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('school_id_at', record.get('id')),
('school_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('hub_id_at', fields.get('Hub')),
('pod_id_at', fields.get('Pod')),
('school_name_at', fields.get('Name')),
('school_short_name_at', fields.get('Short Name')),
('school_status_at', fields.get('School Status')),
('school_ssj_stage_at', fields.get('School Startup Stage')),
('school_governance_model_at', fields.get('Governance Model')),
('school_ages_served_at', fields.get('Ages served')),
('school_location_ids_at', fields.get('Locations')),
('school_id_tc', fields.get('TC school ID'))
])
school_data.append(datum)
if format == 'dataframe':
school_data = convert_school_data_to_df(school_data)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return school_data
def fetch_hub_data(
self,
pull_datetime=None,
params=None,
base_id=SCHOOLS_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching hub data from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Hubs',
params=params
)
hub_data=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('hub_id_at', record.get('id')),
('hub_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('hub_name_at', fields.get('Name'))
])
hub_data.append(datum)
if format == 'dataframe':
hub_data = convert_hub_data_to_df(hub_data)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return hub_data
def fetch_pod_data(
self,
pull_datetime=None,
params=None,
base_id=SCHOOLS_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching pod data from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Pods',
params=params
)
pod_data=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('pod_id_at', record.get('id')),
('pod_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('pod_name_at', fields.get('Name'))
])
pod_data.append(datum)
if format == 'dataframe':
pod_data = convert_pod_data_to_df(pod_data)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return pod_data
def fetch_ethnicity_lookup(self):
ethnicity_categories = self.fetch_ethnicity_categories()
ethnicity_mapping = self.fetch_ethnicity_mapping()
ethnicity_lookup = (
ethnicity_mapping
.join(
ethnicity_categories['ethnicity_category'],
how='left',
on='ethnicity_category_id_at'
)
.reindex(columns=[
'ethnicity_category'
])
.sort_index()
)
return ethnicity_lookup
def fetch_gender_lookup(self):
gender_categories = self.fetch_gender_categories()
gender_mapping = self.fetch_gender_mapping()
gender_lookup = (
gender_mapping
.join(
gender_categories['gender_category'],
how='left',
on='gender_category_id_at'
)
.reindex(columns=[
'gender_category'
])
.sort_index()
.sort_values('gender_category')
)
return gender_lookup
def fetch_household_income_lookup(self):
household_income_categories = self.fetch_household_income_categories()
household_income_mapping = self.fetch_household_income_mapping()
household_income_lookup = (
household_income_mapping
.join(
household_income_categories['household_income_category'],
how='left',
on='household_income_category_id_at'
)
.reindex(columns=[
'household_income_category'
])
.sort_index()
.sort_values('household_income_category')
)
return household_income_lookup
def fetch_nps_lookup(self):
nps_categories = self.fetch_nps_categories()
nps_mapping = self.fetch_nps_mapping()
nps_lookup = (
nps_mapping
.join(
nps_categories['nps_category'],
how='left',
on='nps_category_id_at'
)
.reindex(columns=[
'nps_category'
])
.sort_index()
)
return nps_lookup
def fetch_boolean_lookup(self):
boolean_categories = self.fetch_boolean_categories()
boolean_mapping = self.fetch_boolean_mapping()
boolean_lookup = (
boolean_mapping
.join(
boolean_categories['boolean_category'],
how='left',
on='boolean_category_id_at'
)
.reindex(columns=[
'boolean_category'
])
.sort_index()
.sort_values('boolean_category')
)
return boolean_lookup
def fetch_ethnicity_categories(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching ethnicity categories from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Ethnicity categories',
params=params
)
ethnicity_categories=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('ethnicity_category_id_at', record.get('id')),
('ethnicity_category_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('ethnicity_category', fields.get('ethnicity_category')),
('ethnicity_display_name_english', fields.get('ethnicity_display_name_english')),
('ethnicity_display_name_spanish', fields.get('ethnicity_display_name_spanish')) ])
ethnicity_categories.append(datum)
if format == 'dataframe':
ethnicity_categories = convert_ethnicity_categories_to_df(ethnicity_categories)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return ethnicity_categories
def fetch_gender_categories(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching gender categories from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Gender categories',
params=params
)
gender_categories=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('gender_category_id_at', record.get('id')),
('gender_category_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('gender_category', fields.get('gender_category')),
('gender_display_name_english', fields.get('gender_display_name_english')),
('gender_display_name_spanish', fields.get('gender_display_name_spanish')) ])
gender_categories.append(datum)
if format == 'dataframe':
gender_categories = convert_gender_categories_to_df(gender_categories)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return gender_categories
def fetch_household_income_categories(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching household income categories from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Household income categories',
params=params
)
household_income_categories=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('household_income_category_id_at', record.get('id')),
('household_income_category_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('household_income_category', fields.get('household_income_category')),
('household_income_display_name_english', fields.get('household_income_display_name_english')),
('household_income_display_name_spanish', fields.get('household_income_display_name_spanish')) ])
household_income_categories.append(datum)
if format == 'dataframe':
household_income_categories = convert_household_income_categories_to_df(household_income_categories)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return household_income_categories
def fetch_nps_categories(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching NPS categories from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='NPS categories',
params=params
)
nps_categories=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('nps_category_id_at', record.get('id')),
('nps_category_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('nps_category', fields.get('nps_category')),
('nps_display_name_english', fields.get('nps_display_name_english')),
('nps_display_name_spanish', fields.get('nps_display_name_spanish')) ])
nps_categories.append(datum)
if format == 'dataframe':
nps_categories = convert_nps_categories_to_df(nps_categories)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return nps_categories
def fetch_boolean_categories(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching boolean categories from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Boolean categories',
params=params
)
boolean_categories=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('boolean_category_id_at', record.get('id')),
('boolean_category_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('boolean_category', wf_core_data.utils.to_boolean(fields.get('boolean_category'))),
('boolean_display_name_english', fields.get('boolean_display_name_english')),
('boolean_display_name_spanish', fields.get('boolean_display_name_spanish')) ])
boolean_categories.append(datum)
if format == 'dataframe':
boolean_categories = convert_boolean_categories_to_df(boolean_categories)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return boolean_categories
def fetch_ethnicity_mapping(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching ethnicity mapping from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Ethnicity mapping',
params=params
)
ethnicity_mapping=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('ethnicity_mapping_id_at', record.get('id')),
('ethnicity_mapping_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('ethnicity_response', fields.get('ethnicity_response')),
('ethnicity_category_id_at', fields.get('ethnicity_category'))
])
ethnicity_mapping.append(datum)
if format == 'dataframe':
ethnicity_mapping = convert_ethnicity_mapping_to_df(ethnicity_mapping)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return ethnicity_mapping
def fetch_gender_mapping(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching gender mapping from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Gender mapping',
params=params
)
gender_mapping=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('gender_mapping_id_at', record.get('id')),
('gender_mapping_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('gender_response', fields.get('gender_response')),
('gender_category_id_at', fields.get('gender_category'))
])
gender_mapping.append(datum)
if format == 'dataframe':
gender_mapping = convert_gender_mapping_to_df(gender_mapping)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return gender_mapping
def fetch_household_income_mapping(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching household income mapping from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Household income mapping',
params=params
)
household_income_mapping=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('household_income_mapping_id_at', record.get('id')),
('household_income_mapping_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('household_income_response', fields.get('household_income_response')),
('household_income_category_id_at', fields.get('household_income_category'))
])
household_income_mapping.append(datum)
if format == 'dataframe':
household_income_mapping = convert_household_income_mapping_to_df(household_income_mapping)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return household_income_mapping
def fetch_nps_mapping(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching NPS mapping from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='NPS mapping',
params=params
)
nps_mapping=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('nps_mapping_id_at', record.get('id')),
('nps_mapping_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('nps_response', fields.get('nps_response')),
('nps_category_id_at', fields.get('nps_category'))
])
nps_mapping.append(datum)
if format == 'dataframe':
nps_mapping = convert_nps_mapping_to_df(nps_mapping)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return nps_mapping
def fetch_boolean_mapping(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching boolean mapping from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Boolean mapping',
params=params
)
boolean_mapping=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('boolean_mapping_id_at', record.get('id')),
('boolean_mapping_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('boolean_response', fields.get('boolean_response')),
('boolean_category_id_at', fields.get('boolean_category'))
])
boolean_mapping.append(datum)
if format == 'dataframe':
boolean_mapping = convert_boolean_mapping_to_df(boolean_mapping)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return boolean_mapping
def write_dataframe(
self,
df,
base_id,
endpoint,
params=None,
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS,
write_chunk_size=DEFAULT_WRITE_CHUNK_SIZE
):
num_records = len(df)
num_chunks = (num_records // write_chunk_size) + 1
logger.info('Writing {} records in {} chunks'.format(
num_records,
num_chunks
))
for chunk_index in range(num_chunks):
start_row_index = chunk_index*write_chunk_size
end_row_index = min(
(chunk_index + 1)*write_chunk_size,
num_records
)
chunk_df = df.iloc[start_row_index:end_row_index]
chunk_list = chunk_df.to_dict(orient='records')
chunk_dict = {'records': [{'fields': row_dict} for row_dict in chunk_list]}
logger.info('Writing chunk {}: rows {} to {}'.format(
chunk_index,
start_row_index,
end_row_index
))
self.post(
base_id=base_id,
endpoint=endpoint,
data=chunk_dict
)
time.sleep(delay)
def bulk_get(
self,
base_id,
endpoint,
params=None,
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
if params is None:
params = dict()
num_requests = 0
records = list()
while True:
data = self.get(
base_id=base_id,
endpoint=endpoint,
params=params
)
if 'records' in data.keys():
logging.info('Returned {} records'.format(len(data.get('records'))))
records.extend(data.get('records'))
num_requests += 1
if num_requests >= max_requests:
logger.warning('Reached maximum number of requests ({}). Terminating.'.format(
max_requests
))
break
offset = data.get('offset')
if offset is None:
break
params['offset'] = offset
time.sleep(delay)
return records
def post(
self,
base_id,
endpoint,
data
):
headers = dict()
if self.api_key is not None:
headers['Authorization'] = 'Bearer {}'.format(self.api_key)
r = requests.post(
'{}{}/{}'.format(
self.url_base,
base_id,
endpoint
),
headers=headers,
json=data
)
if r.status_code != 200:
error_message = 'Airtable POST request returned status code {}'.format(r.status_code)
r.raise_for_status()
return r.json()
def get(
self,
base_id,
endpoint,
params=None
):
headers = dict()
if self.api_key is not None:
headers['Authorization'] = 'Bearer {}'.format(self.api_key)
r = requests.get(
'{}{}/{}'.format(
self.url_base,
base_id,
endpoint
),
params=params,
headers=headers
)
if r.status_code != 200:
error_message = 'Airtable GET request returned status code {}'.format(r.status_code)
r.raise_for_status()
return r.json()
def convert_tl_data_to_df(tl_data):
if len(tl_data) == 0:
return pd.DataFrame()
tl_data_df = pd.DataFrame(
tl_data,
dtype='object'
)
tl_data_df['pull_datetime'] = pd.to_datetime(tl_data_df['pull_datetime'])
tl_data_df['teacher_created_datetime_at'] = pd.to_datetime(tl_data_df['teacher_created_datetime_at'])
# school_data_df['user_id_tc'] = pd.to_numeric(tl_data_df['user_id_tc']).astype('Int64')
tl_data_df = tl_data_df.astype({
'teacher_full_name_at': 'string',
'teacher_middle_name_at': 'string',
'teacher_last_name_at': 'string',
'teacher_title_at': 'string',
'teacher_ethnicity_at': 'string',
'teacher_ethnicity_other_at': 'string',
'teacher_income_background_at': 'string',
'teacher_email_at': 'string',
'teacher_email_2_at': 'string',
'teacher_email_3_at': 'string',
'teacher_phone_at': 'string',
'teacher_phone_2_at': 'string',
'teacher_employer_at': 'string',
'hub_at': 'string',
'pod_at': 'string',
'user_id_tc': 'string'
})
tl_data_df.set_index('teacher_id_at', inplace=True)
return tl_data_df
def convert_location_data_to_df(location_data):
if len(location_data) == 0:
return pd.DataFrame()
location_data_df = pd.DataFrame(
location_data,
dtype='object'
)
location_data_df['pull_datetime'] = pd.to_datetime(location_data_df['pull_datetime'])
location_data_df['location_created_datetime_at'] = pd.to_datetime(location_data_df['location_created_datetime_at'])
location_data_df = location_data_df.astype({
'location_id_at': 'string',
'location_address_at': 'string',
'school_id_at': 'string'
})
location_data_df.set_index('location_id_at', inplace=True)
return location_data_df
def convert_teacher_school_data_to_df(teacher_school_data):
if len(teacher_school_data) == 0:
return pd.DataFrame()
teacher_school_data_df = pd.DataFrame(
teacher_school_data,
dtype='object'
)
teacher_school_data_df['pull_datetime'] = | pd.to_datetime(teacher_school_data_df['pull_datetime']) | pandas.to_datetime |
import re
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from woodwork import DataColumn, DataTable
from woodwork.datatable import _check_unique_column_names
from woodwork.logical_types import (
URL,
Boolean,
Categorical,
CountryCode,
Datetime,
Double,
EmailAddress,
Filepath,
FullName,
Integer,
IPAddress,
LatLong,
LogicalType,
NaturalLanguage,
Ordinal,
PhoneNumber,
SubRegionCode,
Timedelta,
ZIPCode
)
from woodwork.tests.testing_utils import (
check_column_order,
mi_between_cols,
to_pandas,
validate_subset_dt
)
from woodwork.utils import import_or_none
dd = import_or_none('dask.dataframe')
dask_delayed = import_or_none('dask.delayed')
ks = import_or_none('databricks.koalas')
def test_datatable_df_property(sample_df):
dt = DataTable(sample_df)
assert dt.df is sample_df
pd.testing.assert_frame_equal(to_pandas(dt.df), to_pandas(sample_df))
def test_datatable_with_numeric_datetime_time_index(time_index_df):
dt = DataTable(time_index_df, time_index='ints', logical_types={'ints': Datetime})
error_msg = 'Time index column must contain datetime or numeric values'
with pytest.raises(TypeError, match=error_msg):
DataTable(time_index_df, name='datatable', time_index='strs', logical_types={'strs': Datetime})
assert dt.time_index == 'ints'
assert dt.to_dataframe()['ints'].dtype == 'datetime64[ns]'
def test_datatable_with_numeric_time_index(time_index_df):
# Set a numeric time index on init
dt = DataTable(time_index_df, time_index='ints')
date_col = dt['ints']
assert dt.time_index == 'ints'
assert date_col.logical_type == Integer
assert date_col.semantic_tags == {'time_index', 'numeric'}
# Specify logical type for time index on init
dt = DataTable(time_index_df, time_index='ints', logical_types={'ints': 'Double'})
date_col = dt['ints']
assert dt.time_index == 'ints'
assert date_col.logical_type == Double
assert date_col.semantic_tags == {'time_index', 'numeric'}
# Change time index to normal datetime time index
dt = dt.set_time_index('times')
date_col = dt['ints']
assert dt.time_index == 'times'
assert date_col.logical_type == Double
assert date_col.semantic_tags == {'numeric'}
# Set numeric time index after init
dt = DataTable(time_index_df, logical_types={'ints': 'Double'})
dt = dt.set_time_index('ints')
date_col = dt['ints']
assert dt.time_index == 'ints'
assert date_col.logical_type == Double
assert date_col.semantic_tags == {'time_index', 'numeric'}
def test_datatable_adds_standard_semantic_tags(sample_df):
dt = DataTable(sample_df,
name='datatable',
logical_types={
'id': Categorical,
'age': Integer,
})
assert dt.semantic_tags['id'] == {'category'}
assert dt.semantic_tags['age'] == {'numeric'}
def test_check_unique_column_names(sample_df):
if ks and isinstance(sample_df, ks.DataFrame):
pytest.skip("Koalas enforces unique column names")
duplicate_cols_df = sample_df.copy()
if dd and isinstance(sample_df, dd.DataFrame):
duplicate_cols_df = dd.concat([duplicate_cols_df, duplicate_cols_df['age']], axis=1)
else:
duplicate_cols_df.insert(0, 'age', [18, 21, 65, 43], allow_duplicates=True)
with pytest.raises(IndexError, match='Dataframe cannot contain duplicate columns names'):
_check_unique_column_names(duplicate_cols_df)
def test_datatable_types(sample_df):
new_dates = ["2019~01~01", "2019~01~02", "2019~01~03", "2019~01~04"]
if dd and isinstance(sample_df, dd.DataFrame):
sample_df['formatted_date'] = pd.Series(new_dates)
else:
sample_df['formatted_date'] = new_dates
ymd_format = Datetime(datetime_format='%Y~%m~%d')
dt = DataTable(sample_df, logical_types={'formatted_date': ymd_format})
returned_types = dt.types
assert isinstance(returned_types, pd.DataFrame)
assert 'Physical Type' in returned_types.columns
assert 'Logical Type' in returned_types.columns
assert 'Semantic Tag(s)' in returned_types.columns
assert returned_types.shape[1] == 3
assert len(returned_types.index) == len(sample_df.columns)
assert all([dc.logical_type in ww.type_system.registered_types or isinstance(dc.logical_type, LogicalType) for dc in dt.columns.values()])
correct_logical_types = {
'id': Integer,
'full_name': NaturalLanguage,
'email': NaturalLanguage,
'phone_number': NaturalLanguage,
'age': Integer,
'signup_date': Datetime,
'is_registered': Boolean,
'formatted_date': ymd_format
}
correct_logical_types = pd.Series(list(correct_logical_types.values()),
index=list(correct_logical_types.keys()))
assert correct_logical_types.equals(returned_types['Logical Type'])
for tag in returned_types['Semantic Tag(s)']:
assert isinstance(tag, str)
def test_datatable_typing_info_with_col_names(sample_df):
dt = DataTable(sample_df)
typing_info_df = dt._get_typing_info(include_names_col=True)
assert isinstance(typing_info_df, pd.DataFrame)
assert 'Data Column' in typing_info_df.columns
assert 'Physical Type' in typing_info_df.columns
assert 'Logical Type' in typing_info_df.columns
assert 'Semantic Tag(s)' in typing_info_df.columns
assert typing_info_df.shape[1] == 4
assert typing_info_df.iloc[:, 0].name == 'Data Column'
assert len(typing_info_df.index) == len(sample_df.columns)
assert all([dc.logical_type in LogicalType.__subclasses__() or isinstance(dc.logical_type, LogicalType) for dc in dt.columns.values()])
correct_logical_types = {
'id': Integer,
'full_name': NaturalLanguage,
'email': NaturalLanguage,
'phone_number': NaturalLanguage,
'age': Integer,
'signup_date': Datetime,
'is_registered': Boolean,
}
correct_logical_types = pd.Series(list(correct_logical_types.values()),
index=list(correct_logical_types.keys()))
assert correct_logical_types.equals(typing_info_df['Logical Type'])
for tag in typing_info_df['Semantic Tag(s)']:
assert isinstance(tag, str)
correct_column_names = pd.Series(list(sample_df.columns),
index=list(sample_df.columns))
assert typing_info_df['Data Column'].equals(correct_column_names)
def test_datatable_head(sample_df):
dt = DataTable(sample_df, index='id', logical_types={'email': 'EmailAddress'}, semantic_tags={'signup_date': 'birthdat'})
head = dt.head()
assert isinstance(head, pd.DataFrame)
assert isinstance(head.columns, pd.MultiIndex)
if dd and isinstance(sample_df, dd.DataFrame):
assert len(head) == 2
else:
assert len(head) == 4
for i in range(len(head.columns)):
name, dtype, logical_type, tags = head.columns[i]
dc = dt[name]
# confirm the order is the same
assert dt._dataframe.columns[i] == name
# confirm the rest of the attributes match up
assert dc.dtype == dtype
assert dc.logical_type == logical_type
assert str(list(dc.semantic_tags)) == tags
shorter_head = dt.head(1)
assert len(shorter_head) == 1
assert head.columns.equals(shorter_head.columns)
def test_datatable_repr(small_df):
dt = DataTable(small_df)
dt_repr = repr(dt)
expected_repr = ' Physical Type Logical Type Semantic Tag(s)\nData Column \nsample_datetime_series datetime64[ns] Datetime []'
assert dt_repr == expected_repr
dt_html_repr = dt._repr_html_()
expected_repr = '<table border="1" class="dataframe">\n <thead>\n <tr style="text-align: right;">\n <th></th>\n <th>Physical Type</th>\n <th>Logical Type</th>\n <th>Semantic Tag(s)</th>\n </tr>\n <tr>\n <th>Data Column</th>\n <th></th>\n <th></th>\n <th></th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>sample_datetime_series</th>\n <td>datetime64[ns]</td>\n <td>Datetime</td>\n <td>[]</td>\n </tr>\n </tbody>\n</table>'
assert dt_html_repr == expected_repr
def test_datatable_repr_empty(empty_df):
dt = DataTable(empty_df)
assert repr(dt) == 'Empty DataTable'
assert dt._repr_html_() == 'Empty DataTable'
assert dt.head() == 'Empty DataTable'
def test_set_types_combined(sample_df):
dt = DataTable(sample_df, index='id', time_index='signup_date')
assert dt['signup_date'].semantic_tags == set(['time_index'])
assert dt['signup_date'].logical_type == Datetime
assert dt['age'].semantic_tags == set(['numeric'])
assert dt['age'].logical_type == Integer
assert dt['is_registered'].semantic_tags == set()
assert dt['is_registered'].logical_type == Boolean
assert dt['email'].logical_type == NaturalLanguage
assert dt['phone_number'].logical_type == NaturalLanguage
semantic_tags = {
'signup_date': ['test1'],
'age': [],
'is_registered': 'test2'
}
logical_types = {
'email': 'EmailAddress',
'phone_number': PhoneNumber,
'age': 'Double'
}
dt = dt.set_types(logical_types=logical_types, semantic_tags=semantic_tags)
assert dt['signup_date'].semantic_tags == set(['test1', 'time_index'])
assert dt['signup_date'].logical_type == Datetime
assert dt['age'].semantic_tags == set(['numeric'])
assert dt['age'].logical_type == Double
assert dt['is_registered'].semantic_tags == set(['test2'])
assert dt['is_registered'].logical_type == Boolean
assert dt['email'].logical_type == EmailAddress
assert dt['phone_number'].logical_type == PhoneNumber
def test_new_dt_from_columns(sample_df):
dt = DataTable(sample_df, time_index='signup_date', index='id', name='dt_name')
dt = dt.set_types(logical_types={
'full_name': FullName,
'email': EmailAddress,
'phone_number': PhoneNumber,
'age': Double,
'signup_date': Datetime,
})
dt.set_types(semantic_tags={
'full_name': ['new_tag', 'tag2'],
'age': 'numeric',
})
empty_dt = dt._new_dt_from_cols([])
assert len(empty_dt.columns) == 0
just_index = dt._new_dt_from_cols(['id'])
assert just_index.index == dt.index
assert just_index.time_index is None
validate_subset_dt(just_index, dt)
just_time_index = dt._new_dt_from_cols(['signup_date'])
assert just_time_index.time_index == dt.time_index
assert just_time_index.index is None
validate_subset_dt(just_time_index, dt)
transfer_schema = dt._new_dt_from_cols(['phone_number'])
assert transfer_schema.index is None
assert transfer_schema.time_index is None
validate_subset_dt(transfer_schema, dt)
def test_pop(sample_df):
dt = DataTable(sample_df,
name='datatable',
logical_types={'age': Integer},
semantic_tags={'age': 'custom_tag'},
use_standard_tags=True)
datacol = dt.pop('age')
assert isinstance(datacol, DataColumn)
assert 'custom_tag' in datacol.semantic_tags
assert all(to_pandas(datacol.to_series()).values == [33, 25, 33, 57])
assert datacol.logical_type == Integer
assert 'age' not in dt.to_dataframe().columns
assert 'age' not in dt.columns
assert 'age' not in dt.logical_types.keys()
assert 'age' not in dt.semantic_tags.keys()
def test_shape(categorical_df):
dt = ww.DataTable(categorical_df)
dt_shape = dt.shape
df_shape = dt.to_dataframe().shape
if dd and isinstance(categorical_df, dd.DataFrame):
assert isinstance(dt.shape[0], dask_delayed.Delayed)
dt_shape = (dt_shape[0].compute(), dt_shape[1])
df_shape = (df_shape[0].compute(), df_shape[1])
assert dt_shape == (10, 5)
assert dt_shape == df_shape
dt.pop('ints')
dt_shape = dt.shape
df_shape = dt.to_dataframe().shape
if dd and isinstance(categorical_df, dd.DataFrame):
assert isinstance(dt.shape[0], dask_delayed.Delayed)
dt_shape = (dt_shape[0].compute(), dt_shape[1])
df_shape = (df_shape[0].compute(), df_shape[1])
assert dt_shape == (10, 4)
assert dt_shape == df_shape
def test_select_invalid_inputs(sample_df):
dt = DataTable(sample_df, time_index='signup_date', index='id', name='dt_name')
dt = dt.set_types(logical_types={
'full_name': FullName,
'email': EmailAddress,
'phone_number': PhoneNumber,
'age': Double,
'signup_date': Datetime,
})
dt = dt.set_types(semantic_tags={
'full_name': ['new_tag', 'tag2'],
'age': 'numeric',
})
err_msg = "Invalid selector used in include: 1 must be either a string or LogicalType"
with pytest.raises(TypeError, match=err_msg):
dt.select(['boolean', 'index', Double, 1])
dt_empty = dt.select([])
assert len(dt_empty.columns) == 0
def test_select_single_inputs(sample_df):
dt = DataTable(sample_df, time_index='signup_date', index='id', name='dt_name')
dt = dt.set_types(logical_types={
'full_name': FullName,
'email': EmailAddress,
'phone_number': PhoneNumber,
'signup_date': Datetime(datetime_format='%Y-%m-%d')
})
dt = dt.set_types(semantic_tags={
'full_name': ['new_tag', 'tag2'],
'age': 'numeric',
'signup_date': 'date_of_birth'
})
dt_ltype_string = dt.select('full_name')
assert len(dt_ltype_string.columns) == 1
assert 'full_name' in dt_ltype_string.columns
dt_ltype_obj = dt.select(Integer)
assert len(dt_ltype_obj.columns) == 2
assert 'age' in dt_ltype_obj.columns
assert 'id' in dt_ltype_obj.columns
dt_tag_string = dt.select('index')
assert len(dt_tag_string.columns) == 1
assert 'id' in dt_tag_string.columns
dt_tag_instantiated = dt.select('Datetime')
assert len(dt_tag_instantiated.columns) == 1
assert 'signup_date' in dt_tag_instantiated.columns
def test_select_list_inputs(sample_df):
dt = DataTable(sample_df, time_index='signup_date', index='id', name='dt_name')
dt = dt.set_types(logical_types={
'full_name': FullName,
'email': EmailAddress,
'phone_number': PhoneNumber,
'signup_date': Datetime(datetime_format='%Y-%m-%d'),
})
dt = dt.set_types(semantic_tags={
'full_name': ['new_tag', 'tag2'],
'age': 'numeric',
'signup_date': 'date_of_birth',
'email': 'tag2',
'is_registered': 'category'
})
dt_just_strings = dt.select(['FullName', 'index', 'tag2', 'boolean'])
assert len(dt_just_strings.columns) == 4
assert 'id' in dt_just_strings.columns
assert 'full_name' in dt_just_strings.columns
assert 'email' in dt_just_strings.columns
assert 'is_registered' in dt_just_strings.columns
dt_mixed_selectors = dt.select([FullName, 'index', 'time_index', Integer])
assert len(dt_mixed_selectors.columns) == 4
assert 'id' in dt_mixed_selectors.columns
assert 'full_name' in dt_mixed_selectors.columns
assert 'signup_date' in dt_mixed_selectors.columns
assert 'age' in dt_mixed_selectors.columns
dt_common_tags = dt.select(['category', 'numeric', Boolean, Datetime])
assert len(dt_common_tags.columns) == 3
assert 'is_registered' in dt_common_tags.columns
assert 'age' in dt_common_tags.columns
assert 'signup_date' in dt_common_tags.columns
def test_select_instantiated():
ymd_format = Datetime(datetime_format='%Y~%m~%d')
df = pd.DataFrame({
'dates': ["2019/01/01", "2019/01/02", "2019/01/03"],
'ymd': ["2019~01~01", "2019~01~02", "2019~01~03"],
})
dt = DataTable(df,
logical_types={'ymd': ymd_format,
'dates': Datetime})
dt = dt.select('Datetime')
assert len(dt.columns) == 2
err_msg = "Invalid selector used in include: Datetime cannot be instantiated"
with pytest.raises(TypeError, match=err_msg):
dt.select(ymd_format)
def test_select_maintain_order(sample_df):
dt = DataTable(sample_df, logical_types={col_name: 'NaturalLanguage' for col_name in sample_df.columns})
new_dt = dt.select('NaturalLanguage')
check_column_order(dt, new_dt)
def test_filter_cols(sample_df):
dt = DataTable(sample_df, time_index='signup_date', index='id', name='dt_name')
filtered = dt._filter_cols(include='email', col_names=True)
assert filtered == ['email']
filtered_log_type_string = dt._filter_cols(include='NaturalLanguage')
filtered_log_type = dt._filter_cols(include=NaturalLanguage)
assert filtered_log_type == filtered_log_type_string
filtered_semantic_tag = dt._filter_cols(include='numeric')
assert filtered_semantic_tag == ['age']
filtered_multiple = dt._filter_cols(include=['numeric'])
expected = ['phone_number', 'age']
for col in filtered_multiple:
assert col in expected
filtered_multiple_overlap = dt._filter_cols(include=['NaturalLanguage', 'email'], col_names=True)
expected = ['full_name', 'phone_number', 'email']
for col in filtered_multiple_overlap:
assert col in expected
def test_datetime_inference_with_format_param():
df = pd.DataFrame({
'index': [0, 1, 2],
'dates': ["2019/01/01", "2019/01/02", "2019/01/03"],
'ymd_special': ["2019~01~01", "2019~01~02", "2019~01~03"],
'mdy_special': pd.Series(['3~11~2000', '3~12~2000', '3~13~2000'], dtype='string'),
})
dt = DataTable(df,
name='dt_name',
logical_types={'ymd_special': Datetime(datetime_format='%Y~%m~%d'),
'mdy_special': Datetime(datetime_format='%m~%d~%Y'),
'dates': Datetime},
time_index='ymd_special')
assert dt.time_index == 'ymd_special'
assert dt['dates'].logical_type == Datetime
assert isinstance(dt['ymd_special'].logical_type, Datetime)
assert isinstance(dt['mdy_special'].logical_type, Datetime)
dt = dt.set_time_index('mdy_special')
assert dt.time_index == 'mdy_special'
df = pd.DataFrame({
'mdy_special': pd.Series(['3&11&2000', '3&12&2000', '3&13&2000'], dtype='string'),
})
dt = DataTable(df)
dt = dt.set_types(logical_types={'mdy_special': Datetime(datetime_format='%m&%d&%Y')})
dt.time_index = 'mdy_special'
assert isinstance(dt['mdy_special'].logical_type, Datetime)
assert dt.time_index == 'mdy_special'
def test_natural_language_inference_with_config_options():
dataframe = pd.DataFrame({
'index': [0, 1, 2],
'values': ["0123456", "01234567", "012345"]
})
ww.config.set_option('natural_language_threshold', 5)
dt = DataTable(dataframe, name='dt_name')
assert dt.columns['values'].logical_type == NaturalLanguage
ww.config.reset_option('natural_language_threshold')
def test_describe_dict(describe_df):
dt = DataTable(describe_df, index='index_col')
stats_dict = dt.describe_dict()
index_order = ['physical_type',
'logical_type',
'semantic_tags',
'count',
'nunique',
'nan_count',
'mean',
'mode',
'std',
'min',
'first_quartile',
'second_quartile',
'third_quartile',
'max',
'num_true',
'num_false']
stats_dict_to_df = | pd.DataFrame(stats_dict) | pandas.DataFrame |
"""Applies the Bollinger stategory on a collection of closing prices."""
import numpy as np
import pandas as pd
try:
from .strategy_base import Strategy
except ImportError:
from strategy_base import Strategy
class Bollinger(Strategy):
"""Applies the Bollinger stategory onto a collection of closing prices."""
def apply_indicator(
self,
npCloses: np.array,
config: dict,
coinsOwned: bool,
) -> dict:
period = config['period']
# Edgecase
if len(npCloses) < period + 1:
return {
'results': {
'Bollinger Low': '',
'Bollinger High': '',
'Bollinger Decision': 0
},
'decision': 0
}
# Convert the closing prices into a Pandas dataframe and add columns
# with certain statistical information that will help calculate the
# Bollinger value.
npCloses = npCloses[-period - 1:]
df = | pd.DataFrame(npCloses, columns=['Close']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import json
from simpletransformers.classification import ClassificationModel, ClassificationArgs
import sklearn
from sklearn.model_selection import train_test_split
import torch
import re
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import os
from tqdm import tqdm
np.random.seed(2)
# import the data
# tweets = pd.read_csv('data/postIR_final.csv')
# os.chdir('..')
tweets = pd.read_csv('D:/Dropbox/Twitter/training_data/training_final.csv', encoding='latin1')
# restrict to tweets with coding
tweets = tweets[tweets['uncivil_final'].isin([0,1])]
# subset to just text and labels, fix columns names
tweets = tweets.loc[:, ['text', 'uncivil_final']]
tweets.columns = ['text', 'labels']
# import other batch
mike = | pd.read_excel(r'D:\Dropbox\wandering-pole\wanderingpole\data\new_pull_Michael.xls') | pandas.read_excel |
# coding=utf-8
import numpy as np
from pandas import (DataFrame, date_range, Timestamp, Series,
to_datetime)
import pandas.util.testing as tm
from .common import TestData
class TestFrameAsof(TestData):
def setup_method(self, method):
self.N = N = 50
self.rng = date_range('1/1/1990', periods=N, freq='53s')
self.df = DataFrame({'A': np.arange(N), 'B': np.arange(N)},
index=self.rng)
def test_basic(self):
df = self.df.copy()
df.loc[15:30, 'A'] = np.nan
dates = date_range('1/1/1990', periods=self.N * 3,
freq='25s')
result = df.asof(dates)
assert result.notnull().all(1).all()
lb = df.index[14]
ub = df.index[30]
dates = list(dates)
result = df.asof(dates)
assert result.notnull().all(1).all()
mask = (result.index >= lb) & (result.index < ub)
rs = result[mask]
assert (rs == 14).all(1).all()
def test_subset(self):
N = 10
rng = date_range('1/1/1990', periods=N, freq='53s')
df = DataFrame({'A': np.arange(N), 'B': np.arange(N)},
index=rng)
df.loc[4:8, 'A'] = np.nan
dates = date_range('1/1/1990', periods=N * 3,
freq='25s')
# with a subset of A should be the same
result = df.asof(dates, subset='A')
expected = df.asof(dates)
tm.assert_frame_equal(result, expected)
# same with A/B
result = df.asof(dates, subset=['A', 'B'])
expected = df.asof(dates)
tm.assert_frame_equal(result, expected)
# B gives self.df.asof
result = df.asof(dates, subset='B')
expected = df.resample('25s', closed='right').ffill().reindex(dates)
expected.iloc[20:] = 9
tm.assert_frame_equal(result, expected)
def test_missing(self):
# GH 15118
# no match found - `where` value before earliest date in index
N = 10
rng = date_range('1/1/1990', periods=N, freq='53s')
df = DataFrame({'A': np.arange(N), 'B': np.arange(N)},
index=rng)
result = df.asof('1989-12-31')
expected = Series(index=['A', 'B'], name= | Timestamp('1989-12-31') | pandas.Timestamp |
#!/usr/bin/python3
# # Compute the distance between traces of capturered on the same channel. Large difference signs probable changes on the channel usage, e.g channel sharing by different emitters
# Import standard libraries
import pandas as pd
import numpy as np
import math
import h5py
from matrixprofile import *
from matrixprofile.discords import discords
import matplotlib.pyplot as plt
# Import specific libraries used by the cortex system
import h5_spectrum as H5
import cortex_names as cn
import cortex_lib as cl
# TODO: This could be improved by testing the similarity before merging and so allow for separation of co channel emissions
def _main():
# open file and get a list of the channel spectrogram groups. Uses H5py for better efficiency
data_store_file = h5py.File(cn.FOLDER_TO_STORE_FILES+'/'+cn.DATA_FILENAME)
channel_spectrogram_list = list(data_store_file[H5.CHANNEL_DATA_GROUP].keys())
data_store_file.close()
# create array with bin edges to be used on the histogram
profile_histogram_bins = np.arange(1, 8, 0.05)
numpy_histogram_bins = np.r_[-np.inf, profile_histogram_bins, np.inf]
# create empty dataframe to store results
channel_distances = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 20 10:08:35 2020
@author: suyu
"""
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import mean_squared_error,roc_auc_score,mean_absolute_error,log_loss
from sklearn.preprocessing import MinMaxScaler
import sys
from sklearn.metrics import make_scorer
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
sys.path.append('../')
from gammli.DataReader import data_initialize
sys.path.append('benchmark/deepfm' )
from fmDataReader import FeatureDictionary, DataParser
from DeepFM import DeepFM
def deepfm_fm(wc, data, meta_info_ori, task_type="Regression", random_state=0, params=None):
train, test = train_test_split(data, test_size=0.2, random_state=0)
tr_x, tr_Xi, tr_y, tr_idx, te_x, te_Xi, te_y, val_x, val_Xi, val_y, val_idx, meta_info, model_info, sy, sy_t = data_initialize(train, test, meta_info_ori, task_type, 'warm', random_state=0, verbose=True)
epochs = params['epochs']
loss_type = params['loss_type']
eval_metric = params['eval_metric']
greater_is_better = params['greater_is_better']
verbose = params['verbose']
early_stopping = params['early_stopping']
NUMERIC_COLS = []
CATEGORICAL_COLS = []
IGNORE_COLS = []
for i, (key, item) in enumerate(meta_info.items()):
if item['type'] == "categorical":
if len(meta_info[key]['values']) ==2:
NUMERIC_COLS.append(key)
else:
CATEGORICAL_COLS.append(key)
elif item['type'] == "target":
IGNORE_COLS.append(key)
else:
NUMERIC_COLS.append(key)
# params
dfm_params = {
"embedding_size": 3,
"deep_layers": [32, 32],
"use_deep" : True ,
"use_fm" : True ,
"deep_layers_activation": tf.nn.relu,
"loss_type" : loss_type,
"epoch": epochs ,
"batch_size": 1000,
"learning_rate": 0.001,
"optimizer_type": "adam",
"batch_norm": 0,
"batch_norm_decay": 0.995,
"l2_reg": 0.1,
"greater_is_better" : greater_is_better,
"verbose": verbose,
"eval_metric": eval_metric,
"random_seed": random_state
}
def _run_base_model_dfm(dfTrain, dfTest, folds, dfm_params):
fd = FeatureDictionary(dfTrain=dfTrain, dfTest=dfTest,
numeric_cols=NUMERIC_COLS,
ignore_cols=IGNORE_COLS)
data_parser = DataParser(feat_dict=fd)
Xi_train, Xv_train, y_train = data_parser.parse(df=dfTrain, has_label=True)
Xi_test, Xv_test, ids_test,idv_test = data_parser.parse(df=dfTest)
dfm_params["feature_size"] = fd.feat_dim
#print(fd.feat_dict)
dfm_params["field_size"] = len(Xi_train[0])
print(dfm_params)
y_train_meta = np.zeros((dfTrain.shape[0], 1), dtype=float)
y_test_meta = np.zeros((dfTest.shape[0], 1), dtype=float)
_get = lambda x, l: [x[i] for i in l]
#gini_results_cv = np.zeros(len(folds), dtype=float)
#gini_results_epoch_train = np.zeros((len(folds), dfm_params["epoch"]), dtype=float)
#gini_results_epoch_valid = np.zeros((len(folds), dfm_params["epoch"]), dtype=float)
y_train = list(map(float,y_train))
for i, (train_idx, valid_idx) in enumerate(folds):
Xi_train_, Xv_train_, y_train_ = _get(Xi_train, train_idx), _get(Xv_train, train_idx), _get(y_train, train_idx)
Xi_valid_, Xv_valid_, y_valid_ = _get(Xi_train, valid_idx), _get(Xv_train, valid_idx), _get(y_train, valid_idx)
dfm = DeepFM(**dfm_params)
dfm.fit(Xi_train_, Xv_train_, y_train_, Xi_valid_, Xv_valid_, y_valid_,early_stopping=early_stopping)
y_train_meta[valid_idx,0] = dfm.predict(Xi_valid_, Xv_valid_)
y_test_meta[:,0] += dfm.predict(Xi_test, Xv_test)
#gini_results_cv[i] = mean_absolute_error(y_valid_, y_train_meta[valid_idx])
#gini_results_epoch_train[i] = dfm.train_result
#gini_results_epoch_valid[i] = dfm.valid_result
y_test_meta /= float(len(folds))
return y_train_meta, y_test_meta
if task_type == "Regression":
cold_mae = []
cold_rmse = []
warm_mae = []
warm_rmse = []
def model_choose(deep):
dfm_params['use_deep']=deep
for times in range(10):
train, test = train_test_split(data, test_size=0.2, random_state=times)
tr_x, tr_Xi, tr_y, tr_idx, te_x, te_Xi, te_y, val_x, val_Xi, val_y, val_idx, meta_info, model_info, sy, sy_t = data_initialize(train, test, meta_info_ori, task_type, 'warm', random_state=0, verbose=False)
train_x = np.concatenate([tr_x,val_x],0)
train_y = np.concatenate([tr_y,val_y],0)
train_y = sy.inverse_transform(train_y)
te_y = sy_t.inverse_transform(te_y)
train_Xi = np.concatenate([tr_Xi,val_Xi],0)
train_ = np.concatenate([train_x,train_Xi,train_y],1)
test_ = np.concatenate([te_x,te_Xi,te_y],1)
dfTrain = pd.DataFrame(train_,columns=train.columns)
dfTest = pd.DataFrame(test_,columns=test.columns)
dfTrain = train
dfTest = test
#dfTrain.user_id = dfTrain.user_id.astype(int).astype(str)
#dfTrain.item_id = dfTrain.item_id.astype(int).astype(str)
#dfTrain.target = dfTrain.target.astype(str)
#posi = dfTrain.shape[1]-3
#dfTrain.iloc[:,np.r_[:posi,-1]].astype(float)
#dfTest.iloc[:,np.r_[:posi,-1]].astype(float)
folds = list(KFold(n_splits=3, shuffle=True,
random_state=random_state).split(dfTrain.iloc[:,:-1].values, dfTrain.target.values))
y_train_dfm, y_test_dfm = _run_base_model_dfm(dfTrain, dfTest, folds, dfm_params)
#y_test_dfm = sy.inverse_transform(y_test_dfm.reshape(-1,1))
#te_y = sy_t.inverse_transform(te_y.reshape(-1,1))
if wc == 'warm':
if len([(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')])!=1:
warm_y = te_y[(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')]
warm_pred = y_test_dfm[(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')]
else:
warm_y = te_y
warm_pred= y_test_dfm
warm_mae.append(mean_absolute_error(warm_y,warm_pred))
warm_rmse.append(mean_squared_error(warm_y,warm_pred)**0.5)
if wc == 'cold':
try:
[(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')] != [True]
print('no cold samples')
continue
except:
cold_y = te_y[(te_Xi[:,1] == 'cold') | (te_Xi[:,0] == 'cold')]
cold_pred = y_test_dfm[(te_Xi[:,1] == 'cold') | (te_Xi[:,0] == 'cold')]
cold_mae.append(mean_absolute_error(cold_y,cold_pred))
cold_rmse.append(mean_squared_error(cold_y,cold_pred)**0.5)
if deep==True:
test_model = 'deepfm'
else:
test_model = 'fm'
if wc == 'warm':
i_result = np.array([test_model,np.mean(warm_mae),np.mean(warm_rmse),np.std(warm_mae),np.std(warm_rmse)]).reshape(1,-1)
result = pd.DataFrame(i_result,columns=['model','warm_mae','warm_rmse','std_warm_mae','std_warm_rmse'])
if wc == 'cold':
i_result = np.array([test_model,np.mean(cold_mae),np.mean(cold_rmse),np.std(cold_mae),np.std(cold_rmse)]).reshape(1,-1)
result = pd.DataFrame(i_result,columns=['model','cold_mae','cold_rmse','std_cold_mae','std_cold_rmse'])
return result
result_1 = (model_choose(True))
result_2 = (model_choose(False))
return result_1, result_2
if task_type == "Classification":
cold_auc = []
cold_logloss = []
warm_auc = []
warm_logloss = []
def model_choose(deep):
dfm_params['use_deep']=deep
for times in range(10):
train, test = train_test_split(data, test_size=0.2, random_state=times)
tr_x, tr_Xi, tr_y, tr_idx, te_x, te_Xi, te_y, val_x, val_Xi, val_y, val_idx, meta_info, model_info, sy, sy_t = data_initialize(train, test, meta_info_ori, task_type, 'warm', random_state=0, verbose=False)
train_x = np.concatenate([tr_x,val_x],0)
train_y = np.concatenate([tr_y,val_y],0)
train_Xi = np.concatenate([tr_Xi,val_Xi],0)
train_ = np.concatenate([train_x,train_Xi,train_y],1)
test_ = np.concatenate([te_x,te_Xi,te_y],1)
dfTrain = pd.DataFrame(train_,columns=train.columns)
dfTest = pd.DataFrame(test_,columns=test.columns)
dfTrain = dfTrain.astype(str)
dfTest = dfTest.astype(str)
dfTrain = train
dfTest = test
#posi = dfTrain.shape[1]-3
#dfTrain.iloc[:,np.r_[:posi,-1]].astype(float)
#dfTest.iloc[:,np.r_[:posi,-1]].astype(float)
folds = list(KFold(n_splits=3, shuffle=True,
random_state=random_state).split(train_x, train_y))
y_train_dfm, y_test_dfm = _run_base_model_dfm(dfTrain, dfTest, folds, dfm_params)
if wc == 'warm':
if len([(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')])!=1:
warm_y = te_y[(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')]
warm_pred = y_test_dfm[(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')]
else:
warm_y = te_y
warm_pred= y_test_dfm
warm_auc.append(roc_auc_score(warm_y,warm_pred))
warm_logloss.append(log_loss(warm_y,warm_pred))
if wc == 'cold':
try:
[(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')] != [True]
print('no cold samples')
continue
except:
cold_y = te_y[(te_Xi[:,1] == 'cold') | (te_Xi[:,0] == 'cold')]
cold_pred = y_test_dfm[(te_Xi[:,1] == 'cold') | (te_Xi[:,0] == 'cold')]
cold_auc.append(roc_auc_score(cold_y,cold_pred))
cold_logloss.append(log_loss(cold_y,cold_pred))
if deep==True:
test_model = 'deepfm'
else:
test_model = 'fm'
if wc == 'warm':
i_result = np.array([test_model,np.mean(warm_auc),np.mean(warm_logloss),np.std(warm_auc),np.std(warm_logloss)]).reshape(1,-1)
result = | pd.DataFrame(i_result,columns=['model','warm_auc','warm_logloss','std_warm_auc','std_warm_logloss']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
from collections import OrderedDict
from functools import partial
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from storefact import get_store_from_url
from plateau.core.dataset import DatasetMetadata
from plateau.core.uuid import gen_uuid
from plateau.io.eager import read_table
from plateau.io_components.metapartition import MetaPartition
from plateau.serialization import DataFrameSerializer
class NoPickle:
def __getstate__(self):
raise RuntimeError("do NOT pickle this object!")
def mark_nopickle(obj):
setattr(obj, "_nopickle", NoPickle())
def no_pickle_store(url):
store = get_store_from_url(url)
mark_nopickle(store)
return store
def no_pickle_factory(url):
return partial(no_pickle_store, url)
@pytest.fixture(params=["URL", "KeyValue", "Callable"])
def store_input_types(request, tmpdir):
url = f"hfs://{tmpdir}"
if request.param == "URL":
return url
elif request.param == "KeyValue":
return get_store_from_url(url)
elif request.param == "Callable":
return no_pickle_factory(url)
else:
raise RuntimeError(f"Encountered unknown store type {type(request.param)}")
def test_store_input_types(store_input_types, bound_store_dataframes):
from plateau.serialization.testing import get_dataframe_not_nested
dataset_uuid = "dataset_uuid"
df = get_dataframe_not_nested(10)
assert bound_store_dataframes(
[df],
dataset_uuid=dataset_uuid,
store=store_input_types,
partition_on=[df.columns[0]],
secondary_indices=[df.columns[1]],
)
def test_file_structure_dataset_v4(store_factory, bound_store_dataframes):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_list = [df.copy(deep=True), df.copy(deep=True)]
dataset = bound_store_dataframes(
df_list, store=store_factory, dataset_uuid="dataset_uuid", metadata_version=4
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
assert len(store.keys()) == 4
assert "dataset_uuid/table/_common_metadata" in store
assert "dataset_uuid.by-dataset-metadata.json" in store
def test_file_structure_dataset_v4_partition_on(store_factory, bound_store_dataframes):
store = store_factory()
assert set(store.keys()) == set()
df = pd.DataFrame(
{"P": [1, 2, 3, 1, 2, 3], "L": [1, 1, 1, 2, 2, 2], "TARGET": np.arange(10, 16)}
)
df_list = [df.copy(deep=True), df.copy(deep=True)]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
assert isinstance(dataset, DatasetMetadata)
assert dataset.partition_keys == ["P", "L"]
assert len(dataset.partitions) == 12
store = store_factory()
actual_keys = set(store.keys())
assert len(actual_keys) == 14 # one per partition + json + schema
def test_store_dataframes_as_dataset(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_list = [df.copy(deep=True), df.copy(deep=True)]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
secondary_indices=["P"],
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
assert "P" in dataset.indices
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
index_dct = stored_dataset.indices["P"].load(store).index_dct
assert sorted(index_dct.keys()) == list(range(0, 10))
counter = 0
for k in store.keys():
if "parquet" in k and "indices" not in k:
counter += 1
df_stored = DataFrameSerializer.restore_dataframe(key=k, store=store)
pdt.assert_frame_equal(df, df_stored)
assert counter == 2
def test_store_dataframes_as_dataset_empty_dataframe(
store_factory, metadata_version, df_all_types, bound_store_dataframes
):
"""
Test that writing an empty column succeeds.
In particular, this may fail due to too strict schema validation.
"""
df_empty = df_all_types.drop(0)
assert df_empty.empty
df_list = [df_empty]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 1
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
df_stored = DataFrameSerializer.restore_dataframe(
key=next(iter(dataset.partitions.values())).files["table"], store=store
)
pdt.assert_frame_equal(df_empty, df_stored)
def test_store_dataframes_as_dataset_batch_mode(
store_factory, metadata_version, bound_store_dataframes
):
# TODO: Kick this out?
values_p1 = [1, 2, 3]
values_p2 = [4, 5, 6]
df = pd.DataFrame({"P": values_p1})
df2 = pd.DataFrame({"P": values_p2})
df_list = [[df, df2]]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
secondary_indices="P",
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store(
"dataset_uuid", store
).load_all_indices(store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
assert "P" in dataset.indices
def test_store_dataframes_as_dataset_auto_uuid(
store_factory, metadata_version, mock_uuid, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_list = [df.copy(deep=True)]
dataset = bound_store_dataframes(
df_list, store=store_factory, metadata_version=metadata_version
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 1
stored_dataset = DatasetMetadata.load_from_store(
"auto_dataset_uuid", store_factory()
)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
def test_store_dataframes_as_dataset_mp_partition_on_none(
metadata_version, store, store_factory, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
mp = MetaPartition(label=gen_uuid(), data=df, metadata_version=metadata_version)
df_list = [None, mp]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
partition_on=["P"],
)
assert isinstance(dataset, DatasetMetadata)
assert dataset.partition_keys == ["P"]
assert len(dataset.partitions) == 10
assert dataset.metadata_version == metadata_version
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset == stored_dataset
def test_store_dataframes_partition_on(store_factory, bound_store_dataframes):
df = pd.DataFrame(
OrderedDict([("location", ["0", "1", "2"]), ("other", ["a", "a", "a"])])
)
# First partition is empty, test this edgecase
input_ = [df.head(0), df]
dataset = bound_store_dataframes(
input_,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=4,
partition_on=["other"],
secondary_indices="location",
)
assert len(dataset.partitions) == 1
assert len(dataset.indices) == 1
assert dataset.partition_keys == ["other"]
def _exception_str(exception):
"""
Extract the exception message, even if this is a re-throw of an exception
in distributed.
"""
if isinstance(exception, ValueError) and exception.args[0] == "Long error message":
return exception.args[1]
return str(exception)
@pytest.mark.parametrize(
"dfs,ok",
[
(
[
pd.DataFrame(
{
"P": | pd.Series([1], dtype=np.int64) | pandas.Series |
import numpy as np
import pandas as pd
from sklearn.base import clone
class MetaCost:
"""A procedure for making error-based classifiers cost-sensitive
Adapted from https://github.com/Treers/MetaCost/blob/master/MetaCost.py
.. note:: The form of the cost matrix C must be as follows:
+---------------+----------+----------+----------+
| actual class | | | |
+ | | | |
| + | y(x)=j_1 | y(x)=j_2 | y(x)=j_3 |
| + | | | |
| + | | | |
|predicted class| | | |
+---------------+----------+----------+----------+
| h(x)=j_1 | 0 | a | b |
| h(x)=j_2 | c | 0 | d |
| h(x)=j_3 | e | f | 0 |
+---------------+----------+----------+----------+
| C = np.array([[0, a, b],[c, 0 , d],[e, f, 0]]) |
+------------------------------------------------+
"""
def __init__(self, estimator, cost_matrix, n_estimators=50, n_samples=None, p=True, q=True):
"""
Parameters
----------
estimator :
An sklearn classifier
cost_matrix :
The cost matrix
n_estimators :
The number of estimators in the ensemble
n_samples :
The number of samples to train each estimator
p :
Is True if the estimator produces class probabilities. False otherwise
q :
True if all samples are to be used for each example
"""
self.estimator = estimator
self.cost_matrix = cost_matrix
self.n_estimators = n_estimators
self. n_samples = n_samples
self.p = p
self.q = q
def fit(self, X, y):
"""
Parameters
----------
X :
Training set
y :
Target
"""
if not isinstance(X, pd.DataFrame):
raise ValueError('S must be a DataFrame object')
X = X.copy()
# reset index, helps with resampling
X.reset_index(inplace=True, drop=True)
y.index = X.index
variables = list(X.columns)
# concatenate
S = pd.concat([X,y], axis=1)
S.columns = variables + ['target']
num_class = y.nunique()
if not self.n_samples:
self.n_samples = len(X)
S_ = {} # list of subdatasets
M = [] # list of models
print('resampling data and training ensemble')
for i in range(self.n_estimators):
# Let S_[i] be a resample of S with self.n examples
S_[i] = S.sample(n=self.n_samples, replace=True)
X = S_[i][variables].values
y = S_[i]['target'].values
# Let M[i] = model produced by applying L to S_[i]
model = clone(self.estimator)
M.append(model.fit(X, y))
print('Finished training ensemble')
label = []
S_array = S[variables].values
# for each observation
print('evaluating optimal class per observation')
for i in range(len(S)):
if self.q:
# consider the predictions of all models
M_ = M
else:
# consider the predictions of models which were not train on
# this particular observation
k_th = [k for k, v in S_.items() if i not in v.index]
M_ = list(np.array(M)[k_th])
if self.p:
P_j = [model.predict_proba(S_array[[i]]) for model in M_]
else:
P_j = []
vector = [0] * num_class
for model in M_:
vector[model.predict(S_array[[i]])] = 1
P_j.append(vector)
# Calculate P(j|x)
# the average probability of each class, when combining all models
P = np.array(np.mean(P_j, 0)).T
# Relabel:
label.append(np.argmin(self.cost_matrix.dot(P)))
print('Finished re-assigning labels')
# Model produced by applying L to S with relabeled y
print('Training model on new data')
X_train = S[variables].values
y_train = np.array(label)
self.estimator.fit(X_train, y_train)
print('Finished training model on data with new labels')
self.y_ = | pd.Series(label) | pandas.Series |
import requests
import pandas as pd
import util_functions as uf
import geopandas as gpd
from shapely.geometry import Point, Polygon
# Set environmental variable
uf.set_env_path()
# Download DC Neighborhood Clusters Data From Open Data DC Website as JSON
dc_clusters_url = "https://opendata.arcgis.com/datasets/f6c703ebe2534fc3800609a07bad8f5b_17.geojson"
dc_clusters = requests.get(dc_clusters_url).json()
# Loop through each feature (cluster) in GeoJson and pull our metadata and polygon
feature_df_list = []
for enum, feature in enumerate(dc_clusters['features']):
# Pull out metadata
feature_df = pd.DataFrame(feature['properties'], index=[enum])
# Convert Polygon geometry to geodataframe
geometry_df = gpd.GeoDataFrame(feature['geometry'])
# Convert geometry to polygon and add back to metadata dataframe
feature_df['polygon'] = Polygon(geometry_df['coordinates'].iloc[0])
feature_df_list.append(feature_df)
# Combine each Cluster into master dataframe
dc_clusters_df = pd.concat(feature_df_list, axis=0)
'''Bring in station information, keeping only region_id = 42 (DC) and assign cluster to each '''
# Connect to AWS
conn, cur = uf.aws_connect()
# Query cabi_region - note that longitute should always come first to generate a geographic Point
stations_df = | pd.read_sql("""SELECT lon, lat, name, short_name, name from cabi_stations_temp WHERE region_id = 42""", con=conn) | pandas.read_sql |
import pandas as pd
class RawReader:
"""
Reads and consumes raw data files (stored as raw.jsonl) from the Music Enabled Running project.
The state can be updated by feeding it additional lines (msg) from the data file.
You can then extract the data of the different sensors and modalities as Pandas dataframes.
"""
def __init__(self):
self.footpods = []
self.footpods_sc = []
self.phone_activity = []
self.phone_motion = []
self.music = []
self.phone_location = []
self.t_range = []
def update_with(self, msg):
"""
Updates the class state with a new message from the raw data file.
Parameters
----------
msg : dict
The dictionary representing the data message from the raw file.
Note, the raw JSON line must be first converted to a dict.
"""
if "t" in msg:
t = msg["t"]
self.t_range = [t, t] if self.t_range == [] else [self.t_range[0], t]
if msg["type"] == "iPhone-pedo":
self.phone_activity.append(
{
"t": pd.Timestamp(msg["t"], unit="s"),
"activity": msg["pedo"]["activity"],
"speed": 0
if msg["pedo"]["pace"] == 0
else 1.0 / msg["pedo"]["pace"],
"step": msg["pedo"]["step"],
"cadence": msg["pedo"]["cadence"] * 60,
"floors_ascended": msg["pedo"]["floorsAscended"]
if "floorsAscended" in msg["pedo"]
else 0,
"floors_descended": msg["pedo"]["floorsDescended"]
if "floorsDescended" in msg["pedo"]
else 0,
}
)
if msg["type"] == "iPhone-motion":
gx = msg["motion"]["ag"][0] - msg["motion"]["a"][0]
gy = msg["motion"]["ag"][1] - msg["motion"]["a"][1]
gz = msg["motion"]["ag"][2] - msg["motion"]["a"][2]
a_vert = (
msg["motion"]["a"][0] * gx
+ msg["motion"]["a"][1] * gy
+ msg["motion"]["a"][2] * gz
)
self.phone_motion.append(
{
"t": pd.Timestamp(msg["t"], unit="s"),
"ax": msg["motion"]["a"][0],
"ay": msg["motion"]["a"][1],
"az": msg["motion"]["a"][2],
"gx": gx,
"gy": gy,
"gz": gz,
"a_vert": a_vert,
"rx": msg["motion"]["r"][0],
"ry": msg["motion"]["r"][1],
"rz": msg["motion"]["r"][2],
}
)
if msg["type"] == "iPhone-location":
self.phone_location.append(
{
"t": | pd.Timestamp(msg["location"]["timestamp"], unit="s") | pandas.Timestamp |
# TODO move away from this test generator style since its we need to manage the generator file,
# which is no longer in this project workspace, as well as the output test file.
## ##
# #
# THIS TEST WAS AUTOGENERATED BY groupby_test_generator.py #
# #
##
# TODO refactor this into table driven tests using pytest parameterize since each test body follows the same structure
# and a single test body with multiple test tabe entries will be more readable and flexible.
from .groupby_unit_test_parameters import *
import pandas as pd
import riptable as rt
import unittest
class autogenerated_gb_tests(unittest.TestCase):
def safe_assert(self, ary1, ary2):
for a, b in zip(ary1, ary2):
if a == a and b == b:
self.assertAlmostEqual(a, b, places=7)
def test_multikey___aggs_median__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(1, 1, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(4, 1, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(7, 1, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(2, 2, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(5, 2, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(1, 3, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(4, 3, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(7, 3, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(2, 1, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(5, 1, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(1, 2, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(4, 2, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(7, 2, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(2, 3, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(5, 3, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(1, 1, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(4, 1, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(7, 1, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(2, 2, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(5, 2, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(1, 3, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(4, 3, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(7, 3, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(2, 1, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(5, 1, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(1, 2, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(4, 2, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(7, 2, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(2, 3, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(5, 3, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(1, 1, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(4, 1, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(7, 1, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(2, 2, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(5, 2, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(1, 3, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(4, 3, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(7, 3, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(2, 1, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(5, 1, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(1, 2, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(4, 2, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(7, 2, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(2, 3, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(5, 3, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_1__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(1, 1, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_4__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(4, 1, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_7__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(7, 1, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_2__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(2, 2, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_5__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(5, 2, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_1__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(1, 3, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_4__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(4, 3, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_7__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(7, 3, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(2, 1, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(5, 1, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(1, 2, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(4, 2, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(7, 2, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(2, 3, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(5, 3, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(1, 1, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(4, 1, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(7, 1, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(2, 2, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(5, 2, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(1, 3, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(4, 3, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(7, 3, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(2, 1, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(5, 1, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(1, 2, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(4, 2, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(7, 2, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(2, 3, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(5, 3, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(1, 1, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(4, 1, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(7, 1, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(2, 2, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(5, 2, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(1, 3, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(4, 3, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(7, 3, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(2, 1, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(5, 1, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(1, 2, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(4, 2, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(7, 2, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(2, 3, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(5, 3, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_1__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(1, 1, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_4__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(4, 1, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_7__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(7, 1, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_2__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(2, 2, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_5__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(5, 2, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_1__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(1, 3, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_4__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(4, 3, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_7__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(7, 3, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(2, 1, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(5, 1, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(1, 2, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(4, 2, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(7, 2, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(2, 3, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(5, 3, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_1__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(1, 1, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_4__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(4, 1, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_7__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(7, 1, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_2__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(2, 2, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_5__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(5, 2, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_1__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(1, 3, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_4__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(4, 3, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_7__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(7, 3, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(2, 1, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
| pd.DataFrame(test_class.data) | pandas.DataFrame |
'''Create figures for results of feature prediction'''
from __future__ import print_function
import os
import pickle
import numpy as np
import pandas as pd
import scipy.stats as st
import matplotlib.pyplot as plt
import seaborn as sns
import bdpy.fig as bfig
from bdpy.util import makedir_ifnot
import god_config as config
# Main #################################################################
def main():
analysis_name = 'GenericObjectDecoding'
resnet_reindex = config.resnet_reindex
resnet_true_layers = config.resnet_true_layers
alexnet_file = os.path.join('results-alexnet', analysis_name + '.pkl')
resnet_file = os.path.join('results-resnet', analysis_name + '.pkl')
output_file_featpred = os.path.join('results', config.analysis_name + '_featureprediction.pdf')
# Load results -----------------------------------------------------
with open(alexnet_file, 'rb') as f:
print('Loading %s' % alexnet_file)
alexnet_results = pickle.load(f)
with open(resnet_file, 'rb') as f:
print('Loading %s' % resnet_file)
resnet_results = pickle.load(f)
# Figure settings
plt.rcParams['font.size'] = 7
# Plot (feature prediction) ----------------------------------------
fig, axes = plt.subplots(4,2,figsize=(8,9))
num_plots = range(8)
# Image
plotresults(fig, axes, alexnet_results, resnet_results, num_plots)
# Save the figure
makedir_ifnot('results')
plt.savefig(output_file_featpred, dpi=300)
print('Saved %s' % output_file_featpred)
plt.show()
# Functions ############################################################
def plotresults(fig, axes, alexnet_results, resnet_results, subplot_index):
'''Draw results of feature prediction'''
for feat_a, feat_b, si in zip(config.alexnet_features, config.resnet_features, subplot_index):
df_a = alexnet_results.loc[alexnet_results["feature"] == feat_a]
df_a = df_a.loc[:, ["feature", "roi", "mean_profile_correlation_image"]]
df_a = df_a.reset_index()
del df_a["index"]
df_b = resnet_results.loc[resnet_results["feature"] == feat_b]
df_b = df_b.loc[:, ["feature", "roi", "mean_profile_correlation_image"]]
df_b = df_b.reset_index()
del df_b["index"]
df = | pd.concat([df_a, df_b]) | pandas.concat |
from __future__ import print_function
import os
import pandas as pd
import xgboost as xgb
import time
import shutil
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
import numpy as np
def archive_results(filename,results,algo,script):
"""
:type algo: basestring
:type script: basestring
:type results: DataFrame
"""
#assert results == pd.DataFrame
now=time.localtime()[0:5]
dirname='../archive'
subdirfmt='%4d-%02d-%02d-%02d-%02d'
subdir=subdirfmt %now
if not os.path.exists(os.path.join(dirname,str(algo))):
os.mkdir(os.path.join(dirname,str(algo)))
dir_to_create=os.path.join(dirname,str(algo),subdir)
if not os.path.exists(dir_to_create):
os.mkdir(dir_to_create)
os.chdir(dir_to_create)
results.to_csv(filename,index=False,float_format='%.6f')
shutil.copy2(script,'.')
return
os.chdir('/home/cuoco/KC/cervical-cancer-screening/src')
trainfile=('../input/patients_train.csv.gz')
testfile=('../input/patients_test.csv.gz')
train=pd.read_csv(trainfile,low_memory=False )
test=pd.read_csv(testfile,low_memory=False )
surgical=pd.read_csv('../features/surgical_pap.csv.gz')
diagnosis=pd.read_csv('../features/diagnosis_hpv.csv.gz')
procedure_cervi=pd.read_csv('../features/procedure_cervi.csv.gz')
procedure_hpv=pd.read_csv('../features/procedure_hpv.csv.gz')
procedure_vaccine=pd.read_csv('../features/procedure_vaccine.csv.gz')
procedure_vagi=pd.read_csv('../features/procedure_vagi.csv.gz')
procedure_plan_type=pd.read_csv('../features/procedure_plan_type.csv.gz')
rx_payment=pd.read_csv('../features/rx_payment.csv.gz')
train_pract_screen_ratio=pd.read_csv('../features/train_pract_screen_ratio.csv.gz')
test_pract_screen_ratio=pd.read_csv('../features/test_pract_screen_ratio.csv.gz')
visits=pd.read_csv('../features/visits.csv.gz')
train=pd.merge(train,surgical, on='patient_id',how='left')
test=pd.merge(test,surgical, on='patient_id',how='left')
print('after merging surgical')
print(train.shape,test.shape)
train=pd.merge(train,diagnosis, on='patient_id',how='left')
test=pd.merge(test,diagnosis, on='patient_id',how='left')
print('after merging diagnosis')
print(train.shape,test.shape)
#train=pd.merge(train,procedure_cervi, on='patient_id',how='left')
#test=pd.merge(test,procedure_cervi, on='patient_id',how='left')
#train=pd.merge(train,procedure_hpv, on='patient_id',how='left')
#test=pd.merge(test,procedure_hpv, on='patient_id',how='left')
#train=pd.merge(train,procedure_vaccine, on='patient_id',how='left')
#test=pd.merge(test,procedure_vaccine, on='patient_id',how='left')
train=pd.merge(train,procedure_vagi, on='patient_id',how='left')
test=pd.merge(test,procedure_vagi, on='patient_id',how='left')
train=pd.merge(train,procedure_plan_type, on='patient_id',how='left')
test=pd.merge(test,procedure_plan_type, on='patient_id',how='left')
print('after merging procedure')
print(train.shape,test.shape)
train=pd.merge(train,rx_payment, on='patient_id',how='left')
test=pd.merge(test,rx_payment, on='patient_id',how='left')
print('after merging rx_payment')
print(train.shape,test.shape)
train=pd.merge(train,train_pract_screen_ratio, on='patient_id',how='left')
test= | pd.merge(test,test_pract_screen_ratio, on='patient_id',how='left') | pandas.merge |
import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import style
#changing local directory to csv file location
os.chdir('E:\\auto')
#reading prices of BSE Auto companies on 26/nov/2019 interval 1min
df= | pd.read_csv('26novprice.csv') | pandas.read_csv |
# -*- coding: UTF-8 -*-
"""
collector.ths - 采集同花顺数据
官网:http://www.10jqka.com.cn
数据中心:http://data.10jqka.com.cn/
====================================================================
"""
import requests
from bs4 import BeautifulSoup
from requests_html import HTMLSession
from zb.crawlers.utils import get_header
import re
import pandas as pd
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 '
'(KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER'}
def http_requests(url):
response = requests.get(url, headers=get_header())
if response.status_code == 200:
html = BeautifulSoup(response.text, 'lxml')
return html
else:
raise ValueError("返回状态码(%s)不是200,url采集失败"
% str(response.status_code))
def zao_pan():
"""获取同花顺早盘必读信息"""
url = "http://stock.10jqka.com.cn/zaopan/"
response = requests.get(url, headers=get_header())
html = BeautifulSoup(response.text, 'lxml')
# 名人名言
wisdom = html.find('div', {'class': "select-content"}).text.strip()
# 昨日收盘
yesterday = html.find('div', {'class': 'yestoday'}).text.strip()
yesterday = yesterday.replace("  ", "|")
# 今日关注
content = html.find('div', {'class': "content-main-fl fl"}).text.strip()
content = re.sub('[ \u3000]', "\n", content)
res = [wisdom, yesterday, content]
return "\n\n".join(res)
# 板块数据
# --------------------------------------------------------------------
def get_ths_plates():
"""获取同花顺所有概念列表
:return: pd.DataFrame
['code', 'kind', 'name', 'url']
"""
plate_kinds = {
'gn': "概念板块",
'dy': "地域板块",
'thshy': "同花顺行业",
'zjhhy': "证监会行业"
}
plates = []
for kind in plate_kinds.keys():
url = "http://q.10jqka.com.cn/%s/" % kind
response = requests.get(url, headers=headers)
html = BeautifulSoup(response.text, "lxml")
results = html.find("div", {"class": "category boxShadow m_links"}).find_all("a")
for a in results:
url = a['href']
plate = {
"name": a.text,
"code": url.strip("/").split('/')[-1],
"url": url,
"kind": plate_kinds[kind]
}
plates.append(plate)
return pd.DataFrame(plates)
def get_plate_fund_flow(kind):
"""获取同花顺最新的行业/概念资金流
:param kind: str
指明需要获取的资金流类型,可选值 ['hyzjl', 'gnzjl']
hyzjl - 行业资金流
gnzil - 概念资金流
:return: pd.DataFrame
['序号', '行业', '行业指数', '涨跌幅', '流入资金(亿)', '流出资金(亿)',
'净额(亿)', '公司家数', '领涨股', '涨跌幅', '当前价(元)']
"""
if kind not in ['hyzjl', 'gnzjl']:
raise ValueError("kind 必须是 ['hyzjl', 'gnzjl'] 中的一个值")
url_template = "http://data.10jqka.com.cn/funds/{kind}/field/" \
"tradezdf/order/desc/page/{page}/ajax/1/"
i = 1
results = []
session = HTMLSession()
while 1:
url = url_template.format(page=i, kind=kind)
response = session.get(url, headers=get_header())
response.html.render()
html = BeautifulSoup(response.html.text, 'lxml')
# table = html.find('table', {'class': "m-table J-ajax-table"}).text.strip()
table = html.text.strip()
cells = table.split("\n")[:-2]
total_pages = int(table.split("\n")[-2].split("/")[1])
col_nums = 11
row_nums = int(len(cells) / col_nums)
col_names = cells[0: 11]
for x in range(1, row_nums):
results.append(cells[x * col_nums: (x + 1) * col_nums])
# 尾页退出
if i >= total_pages:
break
else:
i += 1
return | pd.DataFrame(results, columns=col_names) | pandas.DataFrame |
import csv
import pandas as pd
from datetime import datetime, timedelta
from random import randrange
from nemwriter import NEM12
TEST_FILES = [
"examples/actual_interval.csv",
"examples/multiple_quality.csv",
]
def test_dataframe_export():
"""Create export from dataframe"""
num_intervals = 288
index = [
datetime(2004, 4, 1) + timedelta(minutes=5 * x)
for x in range(1, num_intervals + 1)
]
e1 = [randrange(1, 10) for x in range(1, num_intervals + 1)]
e2 = [randrange(1, 5) for x in range(1, num_intervals + 1)]
s1 = | pd.Series(data=e1, index=index, name="E1") | pandas.Series |
import os
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
import numpy as np
import pandas as pd
import torch
import argparse
from geomloss import SamplesLoss
def cov_mat(x):
"""Compute the (dim, dim) sample cov matrix of a Tensor of shape (n, dim)"""
return np.cov(x.T)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser(description='Plotting metrics.')
parser.add_argument('--exp', type=str, help='Experiment to run')
parser.add_argument('--root', type=str, default="res", help='Root dir for results')
parser.add_argument('--nparticles', type=int, default=100, help='Num of particles')
parser.add_argument('--dim', type=int, default=100, help='Num of particles')
parser.add_argument('--epochs', type=int, default=1000, help='Num of epochs')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
parser.add_argument('--lr_g', type=float, default=0.01, help='learning rate for g')
parser.add_argument('--delta', type=float, help='stepsize for projections')
parser.add_argument('--noise', type=str, default="True", help='noise')
parser.add_argument('--format', type=str, default="png", help='format of figs')
args = parser.parse_args()
dim = args.dim
nparticles = args.nparticles
lr = args.lr
noise = "_noise" if args.noise=="True" else ""
basedir = f"{args.root}/{args.exp}"
resdir = f"rbf_epoch{args.epochs}_lr{lr}_delta{args.delta}_n{nparticles}"
resdir_svgd = f"rbf_epoch{args.epochs}_lr0.1_delta0.1_n{nparticles}"
resdir_ssvgd = f"rbf_epoch{args.epochs}_lr0.1_delta0.1_n{nparticles}"
resdir_hmc = resdir
eff_dims = [1, 2, 5, 10, 20, 30, 40, 50] # projector ranks to show
seeds = range(9)
if __name__ == "__main__":
df_list = []
for seed in seeds:
path = f"{basedir}/{resdir}/seed{seed}"
path_svgd = f"{basedir}/{resdir_svgd}/seed{seed}"
path_ssvgd = f"{basedir}/{resdir_ssvgd}/seed{seed}"
path_hmc = f"{basedir}/{resdir_hmc}/seed{seed}"
# load results
svgd_res = pickle.load(open(f"{path_svgd}/particles_svgd.p", "rb"))
ssvgd_res = pickle.load(open(f"{path_ssvgd}/particles_s-svgd_lrg{args.lr_g}.p", "rb"))
hmc_res = pickle.load(open(f"{path_hmc}/particles_hmc.p", "rb"))
particles_hmc = hmc_res["particles"].cpu()
cov_hmc = cov_mat(particles_hmc)
method_ls = [svgd_res, ssvgd_res]
method_names = ["SVGD", "S-SVGD"]
for eff_dim in eff_dims:
gsvgd_res = pickle.load(open(f"{path}/particles_gsvgd_effdim{eff_dim}.p", "rb"))
method_ls.append(gsvgd_res)
method_names.append(f"GSVGD{eff_dim}")
# load target distribution
target_dist = torch.load(f"{path}/target_dist.p", map_location=device)
data = torch.load(f'{path}/data.p', map_location=device)
_, _, acc_hmc, _ = target_dist.evaluation(particles_hmc, data["X_test"].cpu(), data["y_test"].cpu())
## plot solutions
subplot_c = 3
subplot_r = int(np.ceil(len(method_ls) / subplot_c))
for i, (res, method_name) in enumerate(zip(method_ls, method_names)):
print("Loading", method_name)
particles = res["particles"][-1][1].cpu()
_, _, acc, _ = target_dist.evaluation(particles, data["X_test"].cpu(), data["y_test"].cpu())
# get distance for var
# energy
energy = SamplesLoss("energy")
energy_dist = energy(particles_hmc, particles).item()
# cov matrix
cov_matrix = cov_mat(particles)
l2_dist = np.sqrt(np.sum((cov_matrix - cov_hmc)**2))
l2_diag_dist = np.sqrt(np.sum(np.diag(cov_matrix - cov_hmc)**2))
if not "GSVGD" in method_name:
rep = len(eff_dims)
eff_dim_plot = eff_dims
else:
rep = 1
eff_dim_plot = [int(method_name.split("GSVGD")[-1])]
method_name = "GSVGD"
df_new = pd.DataFrame({
"Energy Distance": [energy_dist] * rep,
"Covariance Error": [l2_dist] * rep,
"l2_diag_dist": [l2_diag_dist] * rep,
"method": [method_name] * rep,
"eff_dim": eff_dim_plot,
"seed": [seed] * rep,
})
df_list.append(df_new)
# gather data into dataframe
df = | pd.concat(df_list) | pandas.concat |
"""
Utilities, accessible via subcommands.
"""
import datetime
import itertools
import json
import os
import re
import shutil
import sys
import click
import numpy as np
import pandas as pd
import vampire.common as common
from vampire import preprocess_adaptive
from vampire.gene_name_conversion import convert_gene_names
from vampire.gene_name_conversion import olga_to_adaptive_dict
from sklearn.model_selection import train_test_split
@click.group()
def cli():
pass
@cli.command()
@click.option('--idx', required=True, help='The row index for the summary output.')
@click.option('--idx-name', required=True, help='The row index name.')
@click.argument('seq_path', type=click.Path(exists=True))
@click.argument('pvae_path', type=click.Path(exists=True))
@click.argument('ppost_path', type=click.Path(exists=True))
@click.argument('out_path', type=click.Path(writable=True))
def merge_ps(idx, idx_name, seq_path, pvae_path, ppost_path, out_path):
"""
Merge probability estimates from Pvae and Ppost into a single data frame and write to an output CSV.
SEQ_PATH should be a path to sequences in canonical CSV format, with
sequences in the same order as PVAE_PATH.
"""
def prep_index(df):
df.set_index(['amino_acid', 'v_gene', 'j_gene'], inplace=True)
df.sort_index(inplace=True)
pvae_df = pd.read_csv(seq_path)
pvae_df['log_Pvae'] = pd.read_csv(pvae_path)['log_p_x']
prep_index(pvae_df)
ppost_df = convert_gene_names(pd.read_csv(ppost_path), olga_to_adaptive_dict())
prep_index(ppost_df)
# If we don't drop duplicates then merge will expand the number of rows.
# See https://stackoverflow.com/questions/39019591/duplicated-rows-when-merging-dataframes-in-python
# We deduplicate Ppost, which is guaranteed to be identical among repeated elements.
merged = pd.merge(pvae_df, ppost_df.drop_duplicates(), how='left', left_index=True, right_index=True)
merged['log_Ppost'] = np.log(merged['Ppost'])
merged.reset_index(inplace=True)
merged[idx_name] = idx
merged.set_index(idx_name, inplace=True)
merged.to_csv(out_path)
@cli.command()
@click.option('--train-size', default=1000, help="Data count to use for train.")
@click.argument('in_csv', type=click.File('r'))
@click.argument('out1_csv', type=click.File('w'))
@click.argument('out2_csv', type=click.File('w'))
def split(train_size, in_csv, out1_csv, out2_csv):
"""
Do a train/test split.
"""
df = pd.read_csv(in_csv)
(df1, df2) = train_test_split(df, train_size=train_size)
df1.to_csv(out1_csv, index=False)
df2.to_csv(out2_csv, index=False)
@cli.command()
@click.option('--out', type=click.File('w'), help='Output file path.', required=True)
@click.option('--idx', required=True, help='The row index for the summary output.')
@click.option('--idx-name', required=True, help='The row index name.')
@click.option(
'--colnames', default='', help='Comma-separated column identifier names corresponding to the files that follow.')
@click.argument('in_paths', nargs=-1)
def summarize(out, idx, idx_name, colnames, in_paths):
"""
Summarize results of a run as a single-row CSV. The input is of flexible
length: each input file is associated with an identifier specified using
the --colnames flag.
"""
colnames = colnames.split(',')
if len(colnames) != len(in_paths):
raise Exception("The number of colnames is not equal to the number of input files.")
input_d = {k: v for k, v in zip(colnames, in_paths)}
index = pd.Index([idx], name=idx_name)
if 'loss' in input_d:
loss_df = | pd.read_csv(input_d['loss'], index_col=0) | pandas.read_csv |
"""Classes for report generation and add-ons."""
import os
from copy import copy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from jinja2 import FileSystemLoader, Environment
from json2html import json2html
from sklearn.metrics import roc_auc_score, precision_recall_fscore_support, roc_curve, precision_recall_curve, \
average_precision_score, explained_variance_score, mean_absolute_error, \
mean_squared_error, median_absolute_error, r2_score, f1_score, precision_score, recall_score, confusion_matrix
from ..utils.logging import get_logger
logger = get_logger(__name__)
base_dir = os.path.dirname(__file__)
def extract_params(input_struct):
params = dict()
iterator = input_struct if isinstance(input_struct, dict) else input_struct.__dict__
for key in iterator:
if key.startswith(('_', 'autonlp_params')):
continue
value = iterator[key]
if type(value) in [bool, int, float, str]:
params[key] = value
elif value is None:
params[key] = None
elif hasattr(value, '__dict__') or isinstance(value, dict):
params[key] = extract_params(value)
else:
params[key] = str(type(value))
return params
def plot_roc_curve_image(data, path):
sns.set(style="whitegrid", font_scale=1.5)
plt.figure(figsize=(10, 10));
fpr, tpr, _ = roc_curve(data['y_true'], data['y_pred'])
auc_score = roc_auc_score(data['y_true'], data['y_pred'])
lw = 2
plt.plot(fpr, tpr, color='blue', lw=lw, label='Trained model');
plt.plot([0, 1], [0, 1], color='red', lw=lw, linestyle='--', label='Random model');
plt.xlim([-0.05, 1.05]);
plt.ylim([-0.05, 1.05]);
plt.xlabel('False Positive Rate');
plt.ylabel('True Positive Rate');
lgd = plt.legend(bbox_to_anchor=(0.5, -0.15), loc='upper center', ncol=2);
plt.xticks(np.arange(0, 1.01, 0.05), rotation=45);
plt.yticks(np.arange(0, 1.01, 0.05));
plt.grid(color='gray', linestyle='-', linewidth=1);
plt.title('ROC curve (GINI = {:.3f})'.format(2 * auc_score - 1));
plt.savefig(path, bbox_extra_artists=(lgd,), bbox_inches='tight');
plt.close()
return auc_score
def plot_pr_curve_image(data, path):
sns.set(style="whitegrid", font_scale=1.5)
plt.figure(figsize=(10, 10));
precision, recall, _ = precision_recall_curve(data['y_true'], data['y_pred'])
ap_score = average_precision_score(data['y_true'], data['y_pred'])
lw = 2
plt.plot(recall, precision, color='blue', lw=lw, label='Trained model');
positive_rate = np.sum(data['y_true'] == 1) / data.shape[0]
plt.plot([0, 1], [positive_rate, positive_rate], \
color='red', lw=lw, linestyle='--', label='Random model');
plt.xlim([-0.05, 1.05]);
plt.ylim([0.45, 1.05]);
plt.xlabel('Recall');
plt.ylabel('Precision');
lgd = plt.legend(bbox_to_anchor=(0.5, -0.15), loc='upper center', ncol=2);
plt.xticks(np.arange(0, 1.01, 0.05), rotation=45);
plt.yticks(np.arange(0, 1.01, 0.05));
plt.grid(color='gray', linestyle='-', linewidth=1);
plt.title('PR curve (AP = {:.3f})'.format(ap_score));
plt.savefig(path, bbox_extra_artists=(lgd,), bbox_inches='tight');
plt.close()
def plot_preds_distribution_by_bins(data, path):
sns.set(style="whitegrid", font_scale=1.5)
fig, axs = plt.subplots(figsize=(16, 10))
box_plot_data = []
labels = []
for name, group in data.groupby('bin'):
labels.append(name)
box_plot_data.append(group['y_pred'].values)
box = axs.boxplot(box_plot_data, patch_artist=True, labels=labels)
for patch in box['boxes']:
patch.set_facecolor('green')
axs.set_yscale('log')
axs.set_xlabel('Bin number')
axs.set_ylabel('Prediction')
axs.set_title('Distribution of object predictions by bin')
fig.savefig(path, bbox_inches='tight');
plt.close()
def plot_distribution_of_logits(data, path):
sns.set(style="whitegrid", font_scale=1.5)
fig, axs = plt.subplots(figsize=(16, 10))
data['proba_logit'] = np.log(data['y_pred'].values / (1 - data['y_pred'].values))
sns.kdeplot(data[data['y_true'] == 0]['proba_logit'], shade=True, color="r", label='Class 0 logits', ax=axs)
sns.kdeplot(data[data['y_true'] == 1]['proba_logit'], shade=True, color="g", label='Class 1 logits', ax=axs)
axs.set_xlabel('Logits')
axs.set_ylabel('Density')
axs.set_title('Logits distribution of object predictions (by classes)');
fig.savefig(path, bbox_inches='tight');
plt.close()
def plot_pie_f1_metric(data, F1_thresh, path):
tn, fp, fn, tp = confusion_matrix(data['y_true'], (data['y_pred'] > F1_thresh).astype(int)).ravel()
(_, prec), (_, rec), (_, F1), (_, _) = precision_recall_fscore_support(data['y_true'],
(data['y_pred'] > F1_thresh).astype(int))
sns.set(style="whitegrid", font_scale=1.5)
fig, ax = plt.subplots(figsize=(20, 10), subplot_kw=dict(aspect="equal"))
recipe = ["{} True Positives".format(tp),
"{} False Positives".format(fp),
"{} False Negatives".format(fn),
"{} True Negatives".format(tn)]
wedges, texts = ax.pie([tp, fp, fn, tn], wedgeprops=dict(width=0.5), startangle=-40)
bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=0.72)
kw = dict(arrowprops=dict(arrowstyle="-", color='k'),
bbox=bbox_props, zorder=0, va="center")
for i, p in enumerate(wedges):
ang = (p.theta2 - p.theta1) / 2. + p.theta1
y = np.sin(np.deg2rad(ang))
x = np.cos(np.deg2rad(ang))
horizontalalignment = {-1: "right", 1: "left"}[int(np.sign(x))]
connectionstyle = "angle,angleA=0,angleB={}".format(ang)
kw["arrowprops"].update({"connectionstyle": connectionstyle})
ax.annotate(recipe[i], xy=(x, y), xytext=(1.35 * np.sign(x), 1.4 * y),
horizontalalignment=horizontalalignment, **kw)
ax.set_title(
"Trained model: Precision = {:.2f}%, Recall = {:.2f}%, F1-Score = {:.2f}%".format(prec * 100, rec * 100, F1 * 100))
plt.savefig(path, bbox_inches='tight');
plt.close()
return prec, rec, F1
def f1_score_w_co(data, min_co=.01, max_co=.99, step=0.01):
data['y_pred'] = np.clip(np.ceil(data['y_pred'].values / step) * step, min_co, max_co)
pos = data['y_true'].sum()
neg = data['y_true'].shape[0] - pos
grp = pd.DataFrame(data).groupby('y_pred')['y_true'].agg(['sum', 'count'])
grp.sort_index(inplace=True)
grp['fp'] = grp['sum'].cumsum()
grp['tp'] = pos - grp['fp']
grp['tn'] = (grp['count'] - grp['sum']).cumsum()
grp['fn'] = neg - grp['tn']
grp['pr'] = grp['tp'] / (grp['tp'] + grp['fp'])
grp['rec'] = grp['tp'] / (grp['tp'] + grp['fn'])
grp['f1_score'] = 2 * (grp['pr'] * grp['rec']) / (grp['pr'] + grp['rec'])
best_score = grp['f1_score'].max()
best_co = grp.index.values[grp['f1_score'] == best_score].mean()
# print((y_pred < best_co).mean())
return best_score, best_co
def get_bins_table(data):
bins_table = data.groupby('bin').agg({'y_true': [len, np.mean], \
'y_pred': [np.min, np.mean, np.max]}).reset_index()
bins_table.columns = ['Bin number', 'Amount of objects', 'Mean target', \
'Min probability', 'Average probability', 'Max probability']
return bins_table.to_html(index=False)
# Regression plots:
def plot_target_distribution_1(data, path):
sns.set(style="whitegrid", font_scale=1.5)
fig, axs = plt.subplots(2, 1, figsize=(16, 20))
sns.kdeplot(data['y_true'], shade=True, color="g", ax=axs[0])
axs[0].set_xlabel('Target value')
axs[0].set_ylabel('Density')
axs[0].set_title('Target distribution (y_true)');
sns.kdeplot(data['y_pred'], shade=True, color="r", ax=axs[1])
axs[1].set_xlabel('Target value')
axs[1].set_ylabel('Density')
axs[1].set_title('Target distribution (y_pred)');
fig.savefig(path, bbox_inches='tight');
plt.close()
def plot_target_distribution_2(data, path):
sns.set(style="whitegrid", font_scale=1.5)
fig, axs = plt.subplots(figsize=(16, 10))
sns.kdeplot(data['y_true'], shade=True, color="g", label="y_true", ax=axs)
sns.kdeplot(data['y_pred'], shade=True, color="r", label="y_pred", ax=axs)
axs.set_xlabel('Target value')
axs.set_ylabel('Density')
axs.set_title('Target distribution');
fig.savefig(path, bbox_inches='tight');
plt.close()
def plot_target_distribution(data, path):
data_pred = pd.DataFrame({'Target value': data['y_pred']})
data_pred['source'] = 'y_pred'
data_true = pd.DataFrame({'Target value': data['y_true']})
data_true['source'] = 'y_true'
data = pd.concat([data_pred, data_true], ignore_index=True)
sns.set(style="whitegrid", font_scale=1.5)
g = sns.displot(data, x="Target value", row="source", height=9, aspect=1.5, kde=True, color="m",
facet_kws=dict(margin_titles=True))
g.fig.suptitle("Target distribution")
g.fig.tight_layout()
g.fig.subplots_adjust(top=0.95)
g.fig.savefig(path, bbox_inches='tight');
plt.close()
def plot_error_hist(data, path):
sns.set(style="whitegrid", font_scale=1.5)
fig, ax = plt.subplots(figsize=(16, 10))
g = sns.kdeplot(data['y_pred'] - data['y_true'], shade=True, color="m", ax=ax)
ax.set_xlabel('Error = y_pred - y_true')
ax.set_ylabel('Density')
ax.set_title('Error histogram');
fig.savefig(path, bbox_inches='tight');
plt.close()
def plot_reg_scatter(data, path):
sns.set(style="whitegrid", font_scale=1.5)
g = sns.jointplot(x="y_pred", y="y_true", data=data, \
kind="reg", truncate=False, color="m", \
height=14)
g.fig.suptitle("Scatter plot")
g.fig.tight_layout()
g.fig.subplots_adjust(top=0.95)
g.fig.savefig(path, bbox_inches='tight');
plt.close()
# Multiclass plots:
def plot_confusion_matrix(data, path):
sns.set(style="whitegrid", font_scale=1.5)
fig, ax = plt.subplots(figsize=(16, 12))
cmat = confusion_matrix(data['y_true'], data['y_pred'], normalize='true')
g = sns.heatmap(cmat, annot=True, linewidths=.5, cmap='Purples', ax=ax)
ax.set_xlabel('y_pred')
ax.set_ylabel('y_true')
ax.set_title('Confusion matrix');
fig.savefig(path, bbox_inches='tight');
plt.close()
class ReportDeco:
"""
Decorator to wrap :class:`~lightautoml.automl.base.AutoML` class to generate html report on ``fit_predict`` and ``predict``.
Example:
>>> report_automl = ReportDeco(output_path='output_path', report_file_name='report_file_name')(automl).
>>> report_automl.fit_predict(train_data)
>>> report_automl.predict(test_data)
Report will be generated at output_path/report_file_name automatically.
Warning:
Do not use it just to inference (if you don't need report), because:
- It needs target variable to calc performance metrics.
- It takes additional time to generate report.
- Dump of decorated automl takes more memory to store.
To get unwrapped fitted instance to pickle
and inferecne access ``report_automl.model`` attribute.
"""
@property
def model(self):
"""Get unwrapped model.
Returns:
model.
"""
return self._model
@property
def mapping(self):
return self._model.reader.class_mapping
def __init__(self, *args, **kwargs):
"""
Note:
Valid kwargs are:
- output_path: Folder with report files.
- report_file_name: Name of main report file.
Args:
*args: Arguments.
**kwargs: Additional parameters.
"""
if not kwargs:
kwargs = {}
# self.task = kwargs.get('task', 'binary')
self.n_bins = kwargs.get('n_bins', 20)
self.template_path = kwargs.get('template_path', os.path.join(base_dir, 'lama_report_templates/'))
self.output_path = kwargs.get('output_path', 'lama_report/')
self.report_file_name = kwargs.get('report_file_name', 'lama_interactive_report.html')
if not os.path.exists(self.output_path):
os.makedirs(self.output_path, exist_ok=True)
self._base_template_path = 'lama_base_template.html'
self._model_section_path = 'model_section.html'
self._train_set_section_path = 'train_set_section.html'
self._results_section_path = 'results_section.html'
self._inference_section_path = {'binary': 'binary_inference_section.html', \
'reg': 'reg_inference_section.html', \
'multiclass': 'multiclass_inference_section.html'}
self.title = 'LAMA report'
self.sections_order = ['intro', 'model', 'train_set', 'results']
self._sections = {}
self._sections['intro'] = '<p>This report was generated automatically.</p>'
self._model_results = []
self.generate_report()
def __call__(self, model):
self._model = model
# AutoML only
self.task = self._model.task._name # valid_task_names = ['binary', 'reg', 'multiclass']
# add informataion to report
self._model_name = model.__class__.__name__
self._model_parameters = json2html.convert(extract_params(model))
self._model_summary = None
self._sections = {}
self._sections['intro'] = '<p>This report was generated automatically.</p>'
self._model_results = []
self._n_test_sample = 0
self._generate_model_section()
self.generate_report()
return self
def _binary_classification_details(self, data):
self._inference_content['sample_bins_table'] = get_bins_table(data)
prec, rec, F1 = plot_pie_f1_metric(data, self._F1_thresh, \
path=os.path.join(self.output_path, self._inference_content['pie_f1_metric']))
auc_score = plot_roc_curve_image(data, path=os.path.join(self.output_path, self._inference_content['roc_curve']))
plot_pr_curve_image(data, path=os.path.join(self.output_path, self._inference_content['pr_curve']))
plot_preds_distribution_by_bins(data, path=os.path.join(self.output_path, \
self._inference_content['preds_distribution_by_bins']))
plot_distribution_of_logits(data, path=os.path.join(self.output_path, \
self._inference_content['distribution_of_logits']))
return auc_score, prec, rec, F1
def _regression_details(self, data):
# graphics
plot_target_distribution(data, path=os.path.join(self.output_path, self._inference_content['target_distribution']))
plot_error_hist(data, path=os.path.join(self.output_path, self._inference_content['error_hist']))
plot_reg_scatter(data, path=os.path.join(self.output_path, self._inference_content['scatter_plot']))
# metrics
mean_ae = mean_absolute_error(data['y_true'], data['y_pred'])
median_ae = median_absolute_error(data['y_true'], data['y_pred'])
mse = mean_squared_error(data['y_true'], data['y_pred'])
r2 = r2_score(data['y_true'], data['y_pred'])
evs = explained_variance_score(data['y_true'], data['y_pred'])
return mean_ae, median_ae, mse, r2, evs
def _multiclass_details(self, data):
y_true = data['y_true']
y_pred = data['y_pred']
# precision
p_micro = precision_score(y_true, y_pred, average='micro')
p_macro = precision_score(y_true, y_pred, average='macro')
p_weighted = precision_score(y_true, y_pred, average='weighted')
# recall
r_micro = recall_score(y_true, y_pred, average='micro')
r_macro = recall_score(y_true, y_pred, average='macro')
r_weighted = recall_score(y_true, y_pred, average='weighted')
# f1-score
f_micro = f1_score(y_true, y_pred, average='micro')
f_macro = f1_score(y_true, y_pred, average='macro')
f_weighted = f1_score(y_true, y_pred, average='weighted')
# classification report for features
classes = sorted(self.mapping, key=self.mapping.get)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred)
cls_report = pd.DataFrame({'Class name': classes, 'Precision': p, 'Recall': r, 'F1-score': f, 'Support': s})
self._inference_content['classification_report'] = cls_report.to_html(index=False, float_format='{:.4f}'.format,
justify='left')
plot_confusion_matrix(data, path=os.path.join(self.output_path, self._inference_content['confusion_matrix']))
return [p_micro, p_macro, p_weighted, r_micro, r_macro, r_weighted, f_micro, f_macro, f_weighted]
def _collect_data(self, preds, sample):
data = pd.DataFrame({'y_true': sample[self._target].values})
if self.task in 'multiclass':
if self.mapping is not None:
data['y_true'] = np.array([self.mapping[y] for y in data['y_true'].values])
data['y_pred'] = preds._data.argmax(axis=1)
else:
data['y_pred'] = preds._data[:, 0]
data.sort_values('y_pred', ascending=False, inplace=True)
data['bin'] = (np.arange(data.shape[0]) / data.shape[0] * self.n_bins).astype(int)
# remove NaN in predictions:
data = data[~data['y_pred'].isnull()]
return data
def fit_predict(self, *args, **kwargs):
"""Wrapped ``automl.fit_predict`` method.
Valid args, kwargs are the same as wrapped automl.
Args:
*args: Arguments.
**kwargs: Additional parameters.
Returns:
OOF predictions.
"""
# TODO: parameters parsing in general case
preds = self._model.fit_predict(*args, **kwargs)
train_data = kwargs["train_data"] if "train_data" in kwargs else args[0]
input_roles = kwargs["roles"] if "roles" in kwargs else args[1]
self._target = input_roles['target']
valid_data = kwargs.get("valid_data", None)
if valid_data is None:
data = self._collect_data(preds, train_data)
else:
data = self._collect_data(preds, valid_data)
self._inference_content = {}
if self.task == 'binary':
# filling for html
self._inference_content = {}
self._inference_content['roc_curve'] = 'valid_roc_curve.png'
self._inference_content['pr_curve'] = 'valid_pr_curve.png'
self._inference_content['pie_f1_metric'] = 'valid_pie_f1_metric.png'
self._inference_content['preds_distribution_by_bins'] = 'valid_preds_distribution_by_bins.png'
self._inference_content['distribution_of_logits'] = 'valid_distribution_of_logits.png'
# graphics and metrics
_, self._F1_thresh = f1_score_w_co(data)
auc_score, prec, rec, F1 = self._binary_classification_details(data)
# update model section
evaluation_parameters = ['AUC-score', \
'Precision', \
'Recall', \
'F1-score']
self._model_summary = pd.DataFrame({'Evaluation parameter': evaluation_parameters, \
'Validation sample': [auc_score, prec, rec, F1]})
elif self.task == 'reg':
# filling for html
self._inference_content['target_distribution'] = 'valid_target_distribution.png'
self._inference_content['error_hist'] = 'valid_error_hist.png'
self._inference_content['scatter_plot'] = 'valid_scatter_plot.png'
# graphics and metrics
mean_ae, median_ae, mse, r2, evs = self._regression_details(data)
# model section
evaluation_parameters = ['Mean absolute error', \
'Median absolute error', \
'Mean squared error', \
'R^2 (coefficient of determination)', \
'Explained variance']
self._model_summary = pd.DataFrame({'Evaluation parameter': evaluation_parameters, \
'Validation sample': [mean_ae, median_ae, mse, r2, evs]})
elif self.task == 'multiclass':
self._inference_content['confusion_matrix'] = 'valid_confusion_matrix.png'
index_names = np.array([['Precision', 'Recall', 'F1-score'], \
['micro', 'macro', 'weighted']])
index = pd.MultiIndex.from_product(index_names, names=['Evaluation metric', 'Average'])
summary = self._multiclass_details(data)
self._model_summary = pd.DataFrame({'Validation sample': summary}, index=index)
self._inference_content['title'] = 'Results on validation sample'
self._generate_model_section()
# generate train data section
self._train_data_overview = self._data_genenal_info(train_data)
self._describe_roles(train_data)
self._describe_dropped_features(train_data)
self._generate_train_set_section()
# generate fit_predict section
self._generate_inference_section(data)
self.generate_report()
return preds
def predict(self, *args, **kwargs):
"""Wrapped automl.predict method.
Valid args, kwargs are the same as wrapped automl.
Args:
*args: arguments.
**kwargs: additional parameters.
Returns:
predictions.
"""
self._n_test_sample += 1
# get predictions
test_preds = self._model.predict(*args, **kwargs)
test_data = kwargs["test"] if "test" in kwargs else args[0]
data = self._collect_data(test_preds, test_data)
if self.task == 'binary':
# filling for html
self._inference_content = {}
self._inference_content['roc_curve'] = 'test_roc_curve_{}.png'.format(self._n_test_sample)
self._inference_content['pr_curve'] = 'test_pr_curve_{}.png'.format(self._n_test_sample)
self._inference_content['pie_f1_metric'] = 'test_pie_f1_metric_{}.png'.format(self._n_test_sample)
self._inference_content['bins_preds'] = 'test_bins_preds_{}.png'.format(self._n_test_sample)
self._inference_content['preds_distribution_by_bins'] = 'test_preds_distribution_by_bins_{}.png'.format(
self._n_test_sample)
self._inference_content['distribution_of_logits'] = 'test_distribution_of_logits_{}.png'.format(self._n_test_sample)
# graphics and metrics
auc_score, prec, rec, F1 = self._binary_classification_details(data)
if self._n_test_sample >= 2:
self._model_summary['Test sample {}'.format(self._n_test_sample)] = [auc_score, prec, rec, F1]
else:
self._model_summary['Test sample'] = [auc_score, prec, rec, F1]
elif self.task == 'reg':
# filling for html
self._inference_content = {}
self._inference_content['target_distribution'] = 'test_target_distribution_{}.png'.format(self._n_test_sample)
self._inference_content['error_hist'] = 'test_error_hist_{}.png'.format(self._n_test_sample)
self._inference_content['scatter_plot'] = 'test_scatter_plot_{}.png'.format(self._n_test_sample)
# graphics
mean_ae, median_ae, mse, r2, evs = self._regression_details(data)
# update model section
if self._n_test_sample >= 2:
self._model_summary['Test sample {}'.format(self._n_test_sample)] = [mean_ae, median_ae, mse, r2, evs]
else:
self._model_summary['Test sample'] = [mean_ae, median_ae, mse, r2, evs]
elif self.task == 'multiclass':
self._inference_content['confusion_matrix'] = 'test_confusion_matrix_{}.png'.format(self._n_test_sample)
test_summary = self._multiclass_details(data)
if self._n_test_sample >= 2:
self._model_summary['Test sample {}'.format(self._n_test_sample)] = test_summary
else:
self._model_summary['Test sample'] = test_summary
# layout depends on number of test samples
if self._n_test_sample >= 2:
self._inference_content['title'] = 'Results on test sample {}'.format(self._n_test_sample)
else:
self._inference_content['title'] = 'Results on test sample'
# update model section
self._generate_model_section()
# generate predict section
self._generate_inference_section(data)
self.generate_report()
return test_preds
def _data_genenal_info(self, data):
general_info = pd.DataFrame(columns=['Parameter', 'Value'])
general_info.loc[0] = ('Number of records', data.shape[0])
general_info.loc[1] = ('Total number of features', data.shape[1])
general_info.loc[2] = ('Used features', len(self._model.reader._used_features))
general_info.loc[3] = ('Dropped features', len(self._model.reader._dropped_features))
# general_info.loc[4] = ('Number of positive cases', np.sum(data[self._target] == 1))
# general_info.loc[5] = ('Number of negative cases', np.sum(data[self._target] == 0))
return general_info.to_html(index=False, justify='left')
def _describe_roles(self, train_data):
# detect feature roles
roles = self._model.reader._roles
numerical_features = [feat_name for feat_name in roles if roles[feat_name].name == 'Numeric']
categorical_features = [feat_name for feat_name in roles if roles[feat_name].name == 'Category']
datetime_features = [feat_name for feat_name in roles if roles[feat_name].name == 'Datetime']
# numerical roles
numerical_features_df = []
for feature_name in numerical_features:
item = {'Feature name': feature_name}
item['NaN ratio'] = "{:.4f}".format(train_data[feature_name].isna().sum() / train_data.shape[0])
values = train_data[feature_name].dropna().values
item['min'] = np.min(values)
item['quantile_25'] = np.quantile(values, 0.25)
item['average'] = np.mean(values)
item['median'] = np.median(values)
item['quantile_75'] = np.quantile(values, 0.75)
item['max'] = np.max(values)
numerical_features_df.append(item)
if numerical_features_df == []:
self._numerical_features_table = None
else:
self._numerical_features_table = | pd.DataFrame(numerical_features_df) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
import codecs
import io
import json
import logging
import os
import re
import tarfile
from collections import Counter
from operator import itemgetter
import numpy as np
import pandas as pd
import six
import tqdm
def strip_empty_strings(strings):
while strings and strings[-1] == "":
del strings[-1]
return strings
def _get_logger(log_level=logging.INFO):
result = logging.getLogger('newsqa')
if not result.handlers:
# Explicitly only set the log level if the logger hasn't been set up yet.
result.setLevel(log_level)
ch = logging.StreamHandler()
ch.setLevel(log_level)
formatter = logging.Formatter(
'[%(levelname)s] %(asctime)s - %(filename)s::%(funcName)s\n%(message)s')
ch.setFormatter(formatter)
result.addHandler(ch)
return result
class NewsQaDataset(object):
def __init__(self, cnn_stories_path=None, dataset_path=None, log_level=logging.INFO,
combined_data_path=None):
self._logger = _get_logger(log_level)
if combined_data_path:
self.dataset = self.load_combined(combined_data_path)
self.version = self._get_version(combined_data_path)
return
if not six.PY2:
raise Exception("Sorry, the loading logic only works with Python 2 for now.")
dirname = os.path.dirname(os.path.abspath(__file__))
if cnn_stories_path is None:
cnn_stories_path = os.path.join(dirname, 'cnn_stories.tgz')
if not os.path.exists(cnn_stories_path):
raise Exception(
"`%s` was not found.\nFor legal reasons, you must first download the stories on "
"your own from http://cs.nyu.edu/~kcho/DMQA/" % cnn_stories_path)
if dataset_path is None:
dataset_path = os.path.join(dirname, 'newsqa-data-v1.csv')
if not os.path.exists(dataset_path):
zipped_dataset_paths = list(filter(os.path.exists,
[
os.path.join(os.path.dirname(dataset_path), 'newsqa-data-v1.tar.gz'),
os.path.join(os.path.dirname(dataset_path), 'newsqa.tar.gz'),
]))
if len(zipped_dataset_paths) > 0:
zipped_dataset_path = zipped_dataset_paths[0]
self._logger.info("Will use zipped dataset at `%s`.", zipped_dataset_path)
with tarfile.open(zipped_dataset_path, mode='r:gz', encoding='utf-8') as t:
extraction_destination_path = os.path.dirname(dataset_path)
self._logger.info("Extracting `%s` to `%s`.", zipped_dataset_path, extraction_destination_path)
t.extractall(path=extraction_destination_path)
else:
raise Exception(
"`%s` was not found.\nFor legal reasons, you must first accept the terms "
"and download the dataset from "
"https://msropendata.com/datasets/939b1042-6402-4697-9c15-7a28de7e1321"
"\n See the README in the root of this repo for more details." % dataset_path)
self.version = self._get_version(dataset_path)
self._logger.info("Loading dataset from `%s`...", dataset_path)
# It's not really combined but it's okay because the method still works
# to load data with missing columns.
self.dataset = self.load_combined(dataset_path)
remaining_story_ids = set(self.dataset['story_id'])
self._logger.info("Loading stories from `%s`...", cnn_stories_path)
with io.open(os.path.join(dirname, 'stories_requiring_extra_newline.csv'),
'r', encoding='utf-8') as f:
stories_requiring_extra_newline = set(f.read().split('\n'))
with io.open(os.path.join(dirname, 'stories_requiring_two_extra_newlines.csv'),
'r', encoding='utf-8') as f:
stories_requiring_two_extra_newlines = set(f.read().split('\n'))
with io.open(os.path.join(dirname, 'stories_to_decode_specially.csv'),
'r', encoding='utf-8') as f:
stories_to_decode_specially = set(f.read().split('\n'))
story_id_to_text = {}
with tarfile.open(cnn_stories_path, mode='r:gz', encoding='utf-8') as t:
highlight_indicator = '@highlight'
copyright_line_pattern = re.compile(
"^(Copyright|Entire contents of this article copyright, )")
with tqdm.tqdm(total=len(remaining_story_ids),
mininterval=2, unit_scale=True, unit=" stories",
desc="Getting story texts") as pbar:
for member in t.getmembers():
story_id = member.name
if story_id in remaining_story_ids:
remaining_story_ids.remove(story_id)
story_file = t.extractfile(member)
# Correct discrepancies in stories.
# Problems are caused by using several programming languages and libraries.
# When ingesting the stories, we started with Python 2.
# After dealing with unicode issues, we tried switching to Python 3.
# That caused inconsistency problems so we switched back to Python 2.
# Furthermore, when crowdsourcing, JavaScript and HTML templating perturbed
# the stories.
# So here we map the text to be compatible with the indices.
if story_id in stories_to_decode_specially:
lines = map(lambda s: u"".join(six.unichr(ord(c)) for c in s.strip()),
story_file.readlines())
else:
lines = map(lambda s: s.strip().decode('utf-8'),
story_file.readlines())
story_file.close()
if not six.PY2:
lines = list(lines)
highlights_start = lines.index(highlight_indicator)
story_lines = lines[:highlights_start]
story_lines = strip_empty_strings(story_lines)
while len(story_lines) > 1 and copyright_line_pattern.search(
story_lines[-1]):
story_lines = strip_empty_strings(story_lines[:-2])
if story_id in stories_requiring_two_extra_newlines:
story_text = '\n\n\n'.join(story_lines)
elif story_id in stories_requiring_extra_newline:
story_text = '\n\n'.join(story_lines)
else:
story_text = '\n'.join(story_lines)
story_text = story_text.replace(u'\xe2\x80\xa2', u'\xe2\u20ac\xa2')
story_text = story_text.replace(u'\xe2\x82\xac', u'\xe2\u201a\xac')
story_text = story_text.replace('\r', '\n')
if story_id in stories_to_decode_specially:
story_text = story_text.replace(u'\xe9', u'\xc3\xa9')
story_id_to_text[story_id] = story_text
pbar.update()
if len(remaining_story_ids) == 0:
break
for row in tqdm.tqdm(self.dataset.itertuples(),
total=len(self.dataset),
mininterval=2, unit_scale=True, unit=" questions",
desc="Setting story texts"):
# Set story_text since we cannot include it in the dataset.
story_text = story_id_to_text[row.story_id]
self.dataset.at[row.Index, 'story_text'] = story_text
# Handle endings that are too large.
answer_char_ranges = row.answer_char_ranges.split('|')
updated_answer_char_ranges = []
ranges_updated = False
for user_answer_char_ranges in answer_char_ranges:
updated_user_answer_char_ranges = []
for char_range in user_answer_char_ranges.split(','):
if char_range != 'None':
start, end = map(int, char_range.split(':'))
if end > len(story_text):
ranges_updated = True
end = len(story_text)
if start < end:
updated_user_answer_char_ranges.append('%d:%d' % (start, end))
else:
# It's unclear why but sometimes the end is after the start.
# We'll filter these out.
ranges_updated = True
else:
updated_user_answer_char_ranges.append(char_range)
if updated_user_answer_char_ranges:
updated_user_answer_char_ranges = ','.join(updated_user_answer_char_ranges)
updated_answer_char_ranges.append(updated_user_answer_char_ranges)
if ranges_updated:
updated_answer_char_ranges = '|'.join(updated_answer_char_ranges)
self.dataset.at[row.Index, 'answer_char_ranges'] = updated_answer_char_ranges
if row.validated_answers and not pd.isnull(row.validated_answers):
updated_validated_answers = {}
validated_answers = json.loads(row.validated_answers)
for char_range, count in six.iteritems(validated_answers):
if ':' in char_range:
start, end = map(int, char_range.split(':'))
if end > len(story_text):
ranges_updated = True
end = len(story_text)
if start < end:
char_range = '{}:{}'.format(start, end)
updated_validated_answers[char_range] = count
else:
# It's unclear why but sometimes the end is after the start.
# We'll filter these out.
ranges_updated = True
else:
updated_validated_answers[char_range] = count
if ranges_updated:
updated_validated_answers = json.dumps(updated_validated_answers,
ensure_ascii=False, separators=(',', ':'))
self.dataset.at[row.Index, 'validated_answers'] = updated_validated_answers
self._logger.info("Done loading dataset.")
@staticmethod
def load_combined(path):
"""
:param path: The path of data to load.
:return: A `DataFrame` containing the data from `path`.
:rtype: pandas.DataFrame
"""
logger = _get_logger()
logger.info("Loading data from `%s`...", path)
result = pd.read_csv(path,
encoding='utf-8',
dtype=dict(is_answer_absent=float),
na_values=dict(question=[], story_text=[], validated_answers=[]),
keep_default_na=False)
if 'story_text' in result.keys():
for row in tqdm.tqdm(result.itertuples(),
total=len(result),
mininterval=2, unit_scale=True, unit=" questions",
desc="Adjusting story texts"):
# Correct story_text to make indices work right.
story_text = row.story_text.replace('\r\n', '\n')
result.at[row.Index, 'story_text'] = story_text
return result
def _get_version(self, path):
m = re.match(r'^.*-v(([\d.])*\d+).[^.]*$', path)
if not m:
raise ValueError("Version number not found in `{}`.".format(path))
return m.group(1)
def _map_answers(self, answers):
result = []
for a in answers.split('|'):
user_answers = []
result.append(dict(sourcerAnswers=user_answers))
for r in a.split(','):
if r == 'None':
user_answers.append(dict(noAnswer=True))
else:
s, e = map(int, r.split(':'))
user_answers.append(dict(s=s, e=e))
return result
def export_shareable(self, path, package_path=None):
"""
Export the dataset without the stories so that it can be shared.
:param path: The path to write the dataset to.
:param package_path: (Optional) If given, the path to write the tar.gz for the website.
"""
self._logger.info("Exporting dataset to %s", path)
columns = list(self.dataset.columns.values)
columns_to_remove = [
'story_title',
'story_text',
'popular_answer_char_ranges',
'popular_answers (for humans to read)',
]
for col in columns_to_remove:
try:
columns.remove(col)
except:
pass
self.dataset.to_csv(path, columns=columns, index=False, encoding='utf-8')
if package_path:
dirname = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.dirname(os.path.dirname(dirname))
os.chdir(os.path.dirname(package_path))
with tarfile.open(os.path.basename(package_path), 'w|gz', encoding='utf-8') as t:
t.add(os.path.join(project_root, 'README-distribution.md'), arcname='README.md')
t.add(os.path.join(project_root, 'LICENSE.txt'), arcname='LICENSE.txt')
t.add(path, arcname=os.path.basename(path))
def dump(self, path):
"""
Export the combined dataset, with stories, to a file.
:param path: The path to write the dataset to.
"""
self._logger.info("Packaging dataset to `%s`.", path)
if path.endswith('.json'):
data = self.to_dict()
# Most reliable way to write UTF-8 JSON as described: https://stackoverflow.com/a/18337754/1226799
data = json.dumps(data, ensure_ascii=False, separators=(',', ':'), encoding='utf-8')
with io.open(path, 'w', encoding='utf-8') as f:
f.write(unicode(data))
else:
if not path.endswith('.csv'):
self._logger.warning("Writing data as CSV to `%s`.", path)
# Default for backwards compatibility.
self.dataset.to_csv(path, index=False, encoding='utf-8')
def get_vocab_len(self):
"""
:return: Approximate vocabulary size.
"""
vocab = set()
for _, row in tqdm.tqdm(self.dataset.iterrows(),
total=len(self.dataset),
mininterval=2, unit_scale=True, unit=" questions",
desc="Gathering vocab"):
vocab.update(row['story_text'].lower().split())
print("Vocabulary length: %s" % len(vocab))
return len(vocab)
def get_answers(self, include_no_answers=False):
answers = []
for row in tqdm.tqdm(self.dataset.itertuples(),
total=len(self.dataset),
mininterval=2, unit_scale=True, unit=" questions",
desc="Gathering answers"):
# Prefer validated answers.
# If there are no validated answers, use the ones that are provided.
if not row.validated_answers or pd.isnull(row.validated_answers):
# Ignore per selection splits.
char_ranges = row.answer_char_ranges.replace('|', ',').split(',')
else:
validated_answers_dict = json.loads(row.validated_answers)
char_ranges = []
for k, v in validated_answers_dict.items():
char_ranges += v * [k]
for char_range in char_ranges:
if include_no_answers and char_range.lower() == 'none':
answers.append(None)
elif ':' in char_range:
start, end = map(int, char_range.split(':'))
answer = row.story_text[start:end]
answers.append(answer)
return pd.Series(answers)
def get_answer_lengths_words(self, max_length=-1):
def get_word_count(answer):
return len(answer.split())
lengths = self.get_answers().apply(get_word_count)
if max_length >= 0:
lengths = lengths[lengths <= max_length]
return lengths
def get_questions_and_answers(self, include_no_answers=False):
qa_map = {}
def add_q_a_pair_to_map(q, a):
q = q.lower().strip().strip('.?!').strip()
if not q:
q = "no_question"
if q in qa_map:
qa_map[q].append(a)
else:
qa_map[q] = [a]
for row in tqdm.tqdm(self.dataset.itertuples(),
total=len(self.dataset),
mininterval=2, unit_scale=True, unit=" questions",
desc="Gathering answers"):
# Prefer validated answers.
# If there are no validated answers, use the ones that are provided.
if not row.validated_answers or pd.isnull(row.validated_answers):
char_ranges = row.answer_char_ranges.replace('|', ',').split(',')
else:
validated_answers_dict = json.loads(row.validated_answers)
char_ranges = []
for k, v in validated_answers_dict.items():
char_ranges += v * [k]
for char_range in char_ranges:
if include_no_answers and char_range.lower() == 'none':
add_q_a_pair_to_map(row.question, "")
elif ':' in char_range:
start, end = map(int, char_range.split(':'))
answer = row.story_text[start:end]
add_q_a_pair_to_map(row.question, answer)
return pd.DataFrame(data=list(qa_map.items()), columns=['question', 'answers'])
def get_average_answer_length_over_questions(self):
def get_word_count(answer):
return len(answer.split())
qas = self.get_questions_and_answers()
avg_ans_lengths = np.zeros(len(qas))
for index, row in qas.iterrows():
avg_ans_lengths[index] = np.average([get_word_count(answer)
for answer in row['answers']])
return | pd.Series(avg_ans_lengths) | pandas.Series |
import pandas as pd
from abc import abstractmethod
from datetime import datetime, timedelta as td
from ..basicData.basicData import BasicData
class OptionBase2:
# %% 初始化
all_trade_dates = BasicData.ALL_TRADE_DATES
price_dict = BasicData.PRICE_DICT
def __init__(self):
self.reset_paras()
self.greek_columns = ['sigma', 'left_days', 'left_times', 'sigma_T', 'stock_price', 'd1', 'nd1', 'Nd1', 'Nd2',
'delta', 'gamma', 'vega', 'theta', 'option_price', 'cash_delta', 'cash_gamma',
'cash_theta', 'option_value']
def reset_paras(self):
self.notional = None
self.stock_code = None
self.start_date = None
self.end_date = None
self.look_back_date = None
self.K = None
self.r = 0.04
self.option_fee = None
self.trade_dates = None
self.trade_datetimes = None
def set_paras(self, notional=None, start_date=None, end_date=None, K=None, r=None, option_fee=None, stock_code=None,
start_price=None):
self.set_notional(notional)
self.set_start_date(start_date)
self.set_end_date(end_date)
self.set_K(K)
self.set_r(r)
self.set_option_fee(option_fee)
self.set_stock_code(stock_code)
self.set_start_price(start_price)
def set_paras_by_dict(self, para_dict):
self.set_notional(para_dict.get('notional'))
self.set_start_date(para_dict.get('start_date'))
self.set_end_date(para_dict.get('end_date'))
self.set_K(para_dict.get('K'))
self.set_r(para_dict.get('r'))
self.set_option_fee(para_dict.get('option_fee'))
self.set_stock_code(para_dict.get('stock_code'))
self.set_start_price(para_dict.get('start_price'))
def set_notional(self, notional=None):
if notional is not None:
self.notional = notional
def set_start_price(self, start_price=None):
if start_price is not None:
self.start_price = start_price
def set_start_date(self, start_date=None):
if start_date is not None:
self.start_date = start_date
if self.end_date is not None:
self.calculate_trade_dates()
def set_end_date(self, end_date=None):
if end_date is not None:
self.end_date = end_date
if self.start_date is not None:
self.calculate_trade_dates()
def set_K(self, K=None):
if K is not None:
self.K = K
def set_r(self, r=None):
if r is not None:
self.r = r
def set_option_fee(self, option_fee=None):
if option_fee is not None:
self.option_fee = option_fee
def set_stock_code(self, stock_code=None):
if stock_code is not None:
self.stock_code = stock_code
def calculate_trade_dates(self):
start_idx = self.all_trade_dates.index(self.start_date)
end_idx = self.all_trade_dates.index(self.end_date) + 1
self.trade_dates = self.all_trade_dates[start_idx:end_idx]
self.look_back_date = self.all_trade_dates[start_idx - 60]
self.look_back_dates = self.all_trade_dates[start_idx - 60:end_idx]
self.trade_datetimes = sorted(
[datetime(x.year, x.month, x.day, 9, 30) for x in self.trade_dates] + [datetime(x.year, x.month, x.day, 15)
for x in self.trade_dates])
self.look_back_datetimes = sorted([datetime(x.year, x.month, x.day, 9, 30) for x in self.look_back_dates] + [
datetime(x.year, x.month, x.day, 15) for x in self.look_back_dates])
self.datetime_length = len(self.trade_datetimes)
@abstractmethod
def calculate_greeks(self):
pass
def get_stock_prices(self):
if self.stock_code is None:
print('股票代码未设定')
return -1
open_price = self.price_dict['open'].loc[self.look_back_dates, self.stock_code]
close_price = self.price_dict['close'].loc[self.look_back_dates, self.stock_code]
open_price.index = open_price.index + td(hours=9, minutes=30)
close_price.index = close_price.index + td(hours=15)
self.stock_prices = | pd.concat([open_price, close_price], axis=0) | pandas.concat |
import os
import pandas as pd
import numpy as np
import torch
import plot_data
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
path = "/home/luo00042/M2SSD/SAMCNet/"
def get_sampler(sample_dataset, label_name, labels, transformed_samples=5):
class_sample_count = [len([x[0] for x in list(sample_dataset.groupby([label_name, 'Sample'])) if x[0][0] == labels[i]]) * transformed_samples for i in range(len(labels))]
sample_labels = np.asarray([[x[1][label_name].cat.codes.iloc[0]] * transformed_samples for x in list(sample_dataset.groupby(['Sample']))]).ravel()
num_samples = sum(class_sample_count)
class_weights = [num_samples/class_sample_count[i] for i in range(len(class_sample_count))]
weights = [class_weights[sample_labels[i]] for i in range(int(num_samples))]
sampler = torch.utils.data.sampler.WeightedRandomSampler(torch.DoubleTensor(weights), len(weights))
return sampler
def read_tsv(in_file, use_margins=False, set_categories=True, sep='\t'):
df = pd.read_csv(in_file, sep=sep, header=0, low_memory=False)
#df = df[(df.Phenotype != 'Potential Artifact') & (df.Phenotype != 'Unclassified') & (df.Phenotype != 'Monocyte') & (df.Phenotype != 'Neutrophyl') & (df.Phenotype != 'Plasma Cell')]
if use_margins:
df = filter_margins(df)
if set_categories:
# Set necessary columns as categorical to later retrieve distinct category codes
df.Sample = pd.Categorical(df.Sample)
df.Pathology = pd.Categorical(df.Pathology)
df.Phenotype = pd.Categorical(df.Phenotype)
df.Status = pd.Categorical(df.Status)
df.HLA1_FUNCTIONAL_threeclass = pd.Categorical(df.HLA1_FUNCTIONAL_threeclass)
return df
def read_margin_samples(in_dir):
mixed_dfs = []
for filename in os.listdir(in_dir + '/' + 'mixed'):
filepath = in_dir + '/' + 'mixed' + '/' + filename
df = read_tsv(filepath, set_categories=False, sep=',')
df['Sample'] = filename
mixed_dfs.append(df)
mixed_df = pd.concat(mixed_dfs)
mixed_df['Margin'] = 'Mixed'
intact_dfs = []
for filename in os.listdir(in_dir + '/' + 'intact'):
filepath = in_dir + '/' + 'intact' + '/' + filename
df = read_tsv(filepath, set_categories=False, sep=',')
df['Sample'] = filename
intact_dfs.append(df)
intact_df = pd.concat(intact_dfs)
intact_df['Margin'] = 'Intact'
df = pd.concat([mixed_df, intact_df])
# Set necessary columns as categorical to later retrieve distinct category codes
df.Sample = pd.Categorical(df.Sample)
df.Pathology = pd.Categorical(df.Pathology)
df['Phenotype'] = | pd.Categorical(df.ManualPhenotype) | pandas.Categorical |
# -*- coding: utf-8 -*-
"""
指数日线行情
"""
from stockquant.settings import CQ_Config
from stockquant.util.stringhelper import TaskEnum
from stockquant.odl.models import TS_Index_Basic, TS_Index_Daily
from stockquant.util.database import get_new_session, session_scope, engine
from stockquant.util.models import TaskTable
from datetime import datetime
from stockquant.util import logger
from tqdm import tqdm
import tushare as ts
import time
import pandas as pd
_logger = logger.Logger(__name__).get_log()
task = TaskEnum.TS指数日线行情
def update_task():
"""
更新任务表
"""
global task
TaskTable.del_with_task(task)
with session_scope() as sm:
query = sm.query(TS_Index_Basic.ts_code, TS_Index_Basic.list_date)
query = query.filter(TS_Index_Basic.market != "SW")
codes = query.all()
tasklist = []
for c in codes:
tasktable = TaskTable(
task=task.value,
task_name=task.name,
ts_code=c.ts_code,
bs_code="NA",
begin_date=c.list_date if c.list_date else datetime(1990, 12, 19).date(),
end_date=datetime.now().date(),
)
tasklist.append(tasktable)
sm.bulk_save_objects(tasklist)
_logger.info("生成{}条任务记录".format(len(codes)))
def _load_data(content: pd.DataFrame, ts_code):
"""
做一些简单转换后,加载数据到数据库
"""
table_name = TS_Index_Daily.__tablename__
if content.empty:
return
try:
content["trade_date"] = | pd.to_datetime(content["trade_date"]) | pandas.to_datetime |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import shapefile
def add_maritimes_region(m):
sf = shapefile.Reader("shapefiles/MaritimesRegionPolygon_UpdatedSept2015_wgs84")
for shape in list(sf.iterShapes()):
npoints=len(shape.points) # total points
nparts = len(shape.parts) # total parts
if nparts == 1:
poly_lons = np.zeros((len(shape.points),1))
poly_lats = np.zeros((len(shape.points),1))
for ip in range(len(shape.points)):
poly_lons[ip] = shape.points[ip][0]
poly_lats[ip] = shape.points[ip][1]
plot_polygon(poly_lons, poly_lats)
else: # loop over parts of each shape, plot separately
for ip in range(nparts): # loop over parts, plot separately
i0=shape.parts[ip]
if ip < nparts-1:
i1 = shape.parts[ip+1]-1
else:
i1 = npoints
seg=shape.points[i0:i1+1]
poly_lons = np.zeros((len(seg),1))
poly_lats = np.zeros((len(seg),1))
for ip in range(len(seg)):
poly_lons[ip] = seg[ip][0]
poly_lats[ip] = seg[ip][1]
plot_polygon(poly_lons, poly_lats,m, edgecolor='#ff0000',linewidth=1.0,alpha=0.6,zorder=40)
return
def plot_polygon(poly_lons, poly_lats, m, edgecolor='#a6a6a6',linewidth=0.5,alpha=0.3,zorder=2):
poly_x, poly_y = m(poly_lons, poly_lats)
poly_xy = np.transpose(np.array((poly_x[:,0], poly_y[:,0])))
# Ad polygon
poly = Polygon(poly_xy,
closed=True,
edgecolor=edgecolor,
linewidth=linewidth,
alpha=alpha,
fill=False,
zorder=zorder)
plt.gca().add_patch(poly)
return
def add_NAFO_areas(m):
sf = shapefile.Reader("shapefiles/NAFO_SubUnits_CanAtlantic")
for shape in list(sf.iterShapes()):
npoints=len(shape.points) # total points
nparts = len(shape.parts) # total parts
if nparts == 1:
poly_lons = np.zeros((len(shape.points),1))
poly_lats = np.zeros((len(shape.points),1))
for ip in range(len(shape.points)):
poly_lons[ip] = shape.points[ip][0]
poly_lats[ip] = shape.points[ip][1]
plot_polygon(poly_lons, poly_lats,m, edgecolor='#a6a6a6',linewidth=0.5,alpha=0.5,zorder=20)
else: # loop over parts of each shape, plot separately
for ip in range(nparts): # loop over parts, plot separately
i0=shape.parts[ip]
if ip < nparts-1:
i1 = shape.parts[ip+1]-1
else:
i1 = npoints
seg=shape.points[i0:i1+1]
poly_lons = np.zeros((len(seg),1))
poly_lats = np.zeros((len(seg),1))
for ip in range(len(seg)):
poly_lons[ip] = seg[ip][0]
poly_lats[ip] = seg[ip][1]
plot_polygon(poly_lons, poly_lats, m, edgecolor='#a6a6a6',linewidth=0.5,alpha=0.5,zorder=20)
# NAFO labels
nafo = pd.read_csv('NAFO_subunit_centroids.csv')
zones = pd.unique(nafo['UnitArea'].values)
for zone in zones:
zone_points = nafo[nafo['UnitArea'] == zone]
lat = zone_points['ddlat'].values[0]
lon = zone_points['ddlong'].values[0]
if lat > 39.9 and lat < 48.3 and lon > -69 and lon < -54.7:
plot_label(lon, lat, zone, m)
return
def plot_label(lon, lat, zone, m):
x, y = m(lon, lat)
plt.text(x, y, zone, fontsize=9,color='#a6a6a6',zorder=35)
return
def plot_CriticalHabitats(m):
nafo = pd.read_csv('NorthAtlanticRightWhale_CH_coords.csv')
zones = | pd.unique(nafo['Polygon_ID'].values) | pandas.unique |
#!/usr/bin/env python
######################################################################################
# DCCN equalized receiver in tensorflow
# Author: <NAME>
# Date: 2021-03-10
# Link: https://github.com/zhongyuanzhao/dl_ofdm
# Cite this work:
# <NAME>, <NAME>, <NAME>, and <NAME>, "Deep-Waveform:
# A Learned OFDM Receiver Based on Deep Complex-valued Convolutional Networks,"
# EESS.SP, vol abs/1810.07181, Mar. 2021, [Online] https://arxiv.org/abs/1810.07181
#
# Copyright (c) 2021: <NAME>
# Houston, Texas, United States
# <<EMAIL>>
######################################################################################
import tensorflow as tf
import numpy as np
import pandas as pd
import scipy.io as sio
import os
import time
# from sklearn.preprocessing import OneHotEncoder
from model import *
from ofdm import *
from radio import *
from util import *
import copy
# these ones let us draw images in our notebook
flags = tf.app.flags
flags.DEFINE_string('save_dir', './output/', 'directory where model graph and weights are saved')
flags.DEFINE_integer('nbits', 1, 'bits per symbol')
flags.DEFINE_integer('msg_length', 100800, 'Message Length of Dataset')
flags.DEFINE_integer('batch_size', 512, '')
flags.DEFINE_integer('max_epoch_num', 5000, '')
flags.DEFINE_integer('seed', 1, 'random seed')
flags.DEFINE_integer('nfft', 64, 'Dropout rate TX conv block')
flags.DEFINE_integer('nsymbol', 7, 'Dropout rate TX conv block')
flags.DEFINE_integer('npilot', 8, 'Dropout rate TX dense block')
flags.DEFINE_integer('nguard', 8, 'Dropout rate RX conv block')
flags.DEFINE_integer('nfilter', 80, 'Dropout rate RX conv block')
flags.DEFINE_float('SNR', 30.0, '')
flags.DEFINE_float('SNR2', 30.0, '')
flags.DEFINE_integer('early_stop',400,'number of epoches for early stop')
flags.DEFINE_boolean('ofdm',True,'If add OFDM layer')
flags.DEFINE_string('pilot', 'lte', 'Pilot type: lte(default), block, comb, scattered')
flags.DEFINE_string('channel', 'EPA', 'AWGN or Rayleigh Channel: Flat, EPA, EVA, ETU')
flags.DEFINE_boolean('cp',True,'If include cyclic prefix')
flags.DEFINE_boolean('longcp',True,'Length of cyclic prefix: true 25%, false 7%')
flags.DEFINE_boolean('load_model',True,'Set True if run a test')
flags.DEFINE_float('split',1.0,'split factor for validation set, no split by default')
flags.DEFINE_string('token', 'OFDM','Name of model to be saved')
flags.DEFINE_integer('opt', 3, '0: default equalizer, 1: NoCConv, 2: NoResidual, 3: DNN')
flags.DEFINE_boolean('mobile', False, 'If Doppler spread is turned on')
flags.DEFINE_float('init_learning', 0.001, '')
flags.DEFINE_boolean('test',False,'Test trained model')
FLAGS = flags.FLAGS
def test_model_cross(FLAGS, path_prefix_min, ofdmobj, session):
y, x, iq_receiver, outputs, total_loss, ber, berlin, conf_matrix, power_tx, noise_pwr, iq_rx, iq_tx, ce_mean, SNR = load_model_np(path_prefix_min,session)
print("Final Test SNR: -10 to 30 dB")
nfft = FLAGS.nfft
nbits = FLAGS.nbits
npilot = FLAGS.npilot # last carrier as pilot
nguard = FLAGS.nguard
nsymbol = FLAGS.nsymbol
DC = 2
np.random.seed(int(time.time()))
frame_size = ofdmobj.frame_size
frame_cnt = 30000
for test_chan in ['ETU','EVA','EPA','Flat', 'Custom']:
df = pd.DataFrame(columns=['SNR', 'BER', 'Loss'])
flagcp = copy.deepcopy(FLAGS)
flagcp.channel = test_chan
# fading = rayleigh_chan_lte(flagcp, ofdmobj.Fs, mobile=FLAGS.mobile)
fading = RayleighChanParallel(flagcp, ofdmobj.Fs, mobile=FLAGS.mobile)
print("Test in %s, mobile: %s"%(test_chan, FLAGS.mobile))
for snr_t in range(-10, 31, 5):
np.random.seed(int(time.time()) + snr_t)
test_ys = bit_source(nbits, frame_size, frame_cnt)
# iq_tx_cmpx, test_xs, iq_pilot_tx = ofdmobj.ofdm_tx_np(test_ys)
iq_tx_cmpx, test_xs, iq_pilot_tx = ofdmobj.ofdm_tx_frame_np(test_ys)
test_xs, _ = fading.run(iq_tx_cmpx)
snr_test = snr_t * np.ones((frame_cnt, 1))
test_xs, pwr_noise_avg = AWGN_channel_np(test_xs, snr_test)
confmax, berl, pwr_tx, pwr_noise, test_loss, tx_sample, rx_sample = session.run([conf_matrix, berlin, power_tx, noise_pwr, ce_mean, iq_tx, iq_rx], {x: test_xs, y: test_ys, SNR:snr_test})
print("SNR: %.2f, BER: %.8f, Loss: %f"%(snr_t, berl, test_loss))
print("Test Confusion Matrix: ")
print(str(confmax))
df = df.append({'SNR': snr_t, 'BER': berl, 'Loss': test_loss}, ignore_index=True)
df = df.set_index('SNR')
# csvfile = 'Test_DCCN_%s_test_chan_%s.csv'%(FLAGS.token + '_Equalizer_' + FLAGS.channel, test_chan)
if FLAGS.mobile:
csvfile = 'Test_DCCN_%s_test_chan_%s_mobile.csv' % (FLAGS.token + '_Equalizer%d_' % (FLAGS.opt) + FLAGS.channel, test_chan)
else:
csvfile = 'Test_DCCN_%s_test_chan_%s.csv'%(FLAGS.token + '_Equalizer%d_'%(FLAGS.opt) + FLAGS.channel, test_chan)
df.to_csv(csvfile)
session.close()
def test_model(FLAGS, path_prefix_min,ofdmobj,session):
y, x, iq_receiver, outputs, total_loss, ber, berlin, conf_matrix, power_tx, noise_pwr, iq_rx, iq_tx, ce_mean, SNR = load_model_np(path_prefix_min,session)
print("Final Test SNR: -10 to 30 dB")
nfft = FLAGS.nfft
nbits = FLAGS.nbits
npilot = FLAGS.npilot # last carrier as pilot
nguard = FLAGS.nguard
nsymbol = FLAGS.nsymbol
DC = 2
frame_size = ofdmobj.frame_size
frame_cnt = 20000
df = pd.DataFrame(columns=['SNR','BER','Loss'])
fading = rayleigh_chan_lte(FLAGS, ofdmobj.Fs)
for snr_t in range(-10, 31):
np.random.seed(int(time.time()) + snr_t)
test_ys = bit_source(nbits, frame_size, frame_cnt)
# iq_tx_cmpx, test_xs, iq_pilot_tx = ofdmobj.ofdm_tx_np(test_ys)
iq_tx_cmpx, test_xs, iq_pilot_tx = ofdmobj.ofdm_tx_frame_np(test_ys)
test_xs, _ = fading.run(iq_tx_cmpx)
snr_test = snr_t * np.ones((frame_cnt, 1))
test_xs, pwr_noise_avg = AWGN_channel_np(test_xs, snr_test)
confmax, berl, pwr_tx, pwr_noise, test_loss, tx_sample, rx_sample = session.run([conf_matrix, berlin, power_tx, noise_pwr, ce_mean, iq_tx, iq_rx], {x: test_xs, y: test_ys, SNR:snr_test})
print("SNR: %.2f, BER: %.8f, Loss: %f"%(snr_t, berl, test_loss))
print("Test Confusion Matrix: ")
print(str(confmax))
df = df.append({'SNR': snr_t, 'BER': berl, 'Loss': test_loss}, ignore_index=True)
df = df.set_index('SNR')
csvfile = 'Test_DCCN_%s.csv'%(FLAGS.token + '_Equalizer_' + FLAGS.channel)
df.to_csv(csvfile)
session.close()
def test_model_mat(FLAGS, path_prefix_min,ofdmobj,session):
y, x, iq_receiver, outputs, total_loss, ber, berlin, conf_matrix, power_tx, noise_pwr, iq_rx, iq_tx, ce_mean, SNR = load_model_np(path_prefix_min,session)
print("Final Test SNR: -10 to 30 dB")
mod_names = ['BPSK','QPSK','8QAM','16QAM']
data_dir = '../m/mat'
nfft = ofdmobj.K
nbits = FLAGS.nbits
nsymbol = FLAGS.nsymbol
n_sc = ofdmobj.CP + ofdmobj.K
frame_size = ofdmobj.frame_size
frame_cnt = 20000
msg_length = frame_cnt*nsymbol
if FLAGS.longcp:
cpstr = ''
else:
cpstr = '_shortcp'
df = | pd.DataFrame(columns=['SNR','BER','Loss']) | pandas.DataFrame |
import pandas as pd
import os
import re
import matplotlib.pyplot as plt
def get_duplicates_of(df, name):
"""
Return the names of columns that correspond to duplicates: X, X -> X, X.1
:param df: pandas dataframe
:param name: original name (eg. X for X.1)
:return: list of strings
"""
regex = re.compile(r'^(__name__(?:\.\d+)?)$'.replace('__name__', name))
return [c for c in df.columns if bool(regex.match(c))]
def read_station(station_file):
"""
Read the station name corresponding to the USAF code in the station file
:param station_file:
:return: dict
"""
if os.path.exists(station_file):
with open(station_file, 'r') as f_station:
content = f_station.readlines()
# content: USAF, WBAN, STATION_NAME, COUNTRY, ...
header = content[0]
station_start = header.find('STATION NAME')
country_start = header.find('COUNTRY')
# read content (note: skip delimiter line)
stations = {}
for line in content[2:] :
usaf = line.split()[0].strip()
name = line[station_start: country_start-1].strip()
stations[usaf] = name
return stations
else:
# could not find the station file
return None
def read_data(data_file, station_file=None, light_output=True):
"""
Read a meteo dataset
:param data_file:
:param station_file:
:return: pandas dataframe
"""
# read file
df = pd.read_csv(data_file,
delim_whitespace=True,
na_values=['*', '**', '***', '****', '*****'],
dtype={'YR--MODAHRMN': str, 'USAF': str, 'SKC': str})
# use date as index
date = pd.to_datetime(df['YR--MODAHRMN'],
format='%Y%m%d%H%M')
df.pop('YR--MODAHRMN')
df.set_index(date, inplace=True)
# direction: NaN values are given as 990
df.loc[df['DIR'] > 370, 'DIR'] = pd.np.NaN
# station : station names if station_file is provided, USAF code otherwise
if station_file:
stations = read_station(station_file)
if stations:
# util function
def usaf_to_station(usaf):
if usaf in stations:
return stations[usaf]
else:
return usaf
# apply util function to modify 'station'
df['station'] = df['USAF'].apply(usaf_to_station).astype('category')
return df
def select_data(df, basic=False, cloud=False,
precipitation=False, observation=False):
"""
Return a profile of meteo data based on output options.
Notice that if all output options are deactivated (False), the original
dataframe is returned.
:param df: input pandas dataframe
:param basic: output contains DIR, SPD, TEMP, DEWP
:param cloud: output contains CLG, SKC, L, M, H
:param precipitation: output contains PCP01, PCP06, PCP24, PCPXX
:param observation: output contains W, and MW & AW with renamed duplic.
:return: dataframe with selected columns
"""
if sum([basic, cloud, precipitation, observation]) == 0:
# default: return original dataframe
return df
else:
# create list of columns in the output
list_output = ['station', 'WBAN', 'USAF']
if basic:
list_output += ['DIR', 'SPD', 'TEMP', 'DEWP']
if cloud:
list_output += ['CLG', 'SKC', 'L', 'M', 'H']
if precipitation:
list_output += ['PCP01', 'PCP06', 'PCP24', 'PCPXX']
if observation:
list_output.append('W')
list_output += get_duplicates_of(df, 'MW')
list_output += get_duplicates_of(df, 'AW')
# clean list: keep only names of actual columns
list_clean = [s for s in list_output if s in df.columns]
# create output
return df[list_clean]
def convert(serie, conversion):
"""
Convert physical quantities from units to others
:param serie: input pandas serie
:param conversion: conversion string
:return: pandas serie
"""
if conversion == 'fahrenheit_to_celsius':
return (5./9.)*(serie-32)
if conversion == 'fahrenheit_to_kelvin':
return (5./9.)*(serie+459.67)
if conversion == 'celsius_to_kelvin':
return serie+273.15
if conversion == 'kelvin_to_celsius':
return serie-273.15
if conversion == 'inch_to_m':
return serie*0.0254
if conversion == 'mile_to_m':
return serie*1609.344
if conversion == 'mile/h_to_m/s':
return serie*0.44704
if conversion == 'km/h_to_m/s':
return serie*0.2777778
if conversion == 'mbar_to_N.m2':
return serie*100.
else:
print('Unknown conversion')
return serie*0.
def select_station(df, name):
"""
Extract the data corresponding to a station
:param df: dataframe
:param name: name of the station, or USAF code if df has no column 'station'
:return: sub-dataframe for the station
"""
# get stations
if 'station' in df.columns:
stations = list_stations(df)
if name in stations:
return df[df['station'] == name]
else:
raise ValueError('Dataframe has no station nammed "'+name+'"')
else :
usaf = list_stations(df)
if name in usaf:
return df[df['USAF'] == name]
else:
raise ValueError('Dataframe has no station with USAF "'+name+'"')
def list_stations(df):
"""
Return the list of stations, or USAF codes, for the dataframe
:param df: input dataframe
:return:
"""
if 'station' in df.columns:
return list(df['station'].unique())
else:
return list(df['USAF'].unique())
def select_time(df, first_day, last_day=None, years=None, months=None,
weeks=None, days=None):
"""
Extract a period of time. Notice that the input dataframe must not contain
more than one station.
:param df: input dataframe
:param first_day: date of the first day in YYYY/MM/DD format
:param last_day: date of the last day in YYYY/MM/DD format (optional)
:param years: number of years (optional)
:param months: number of months (optional)
:param weeks: number of weeks (optional)
:param days: number of days (optional)
:return: dataframe
"""
date_format = '%Y/%m/%d'
first_datetime = pd.to_datetime(first_day, format=date_format)
if last_day:
# mode 1: time defined by first and last day
last_datetime = pd.to_datetime(last_day, format=date_format)
return df[first_datetime:last_datetime]
else:
# mode 2: time defined by first day and period
last_datetime = first_datetime
# handle errors
if not any([days, weeks, months, years]):
error = 'Give at least one of: "days", "weeks", "months" or "years"'
raise ValueError(error)
# increment last_datetime in that order: year, month, week, day
if years:
for year_index in range(years):
if last_datetime.year % 4 == 0:
last_datetime += pd.to_timedelta(366, unit='d')
else:
last_datetime += pd.to_timedelta(365, unit='d')
if months:
for month_index in range(months):
days_in_month = last_datetime.days_in_month
last_datetime += pd.to_timedelta(days_in_month, unit='M')
if weeks:
last_datetime += pd.to_timedelta(weeks, unit='w')
if days:
last_datetime += pd.to_timedelta(days, unit='d')
# result
df_sliced = df[first_datetime:last_datetime]
if not last_day:
# ensure last_datetime is excluded from output
if df_sliced.index[-1] == last_datetime:
df_sliced = df_sliced.iloc[:-1]
return df_sliced
def convert_to_si(df):
"""
Convert all fields to SI units
:param df: input dataframe
:return: dataframe
"""
df_out = df.copy()
columns = df.columns
if 'SPD' in columns:
df_out['SPD'] = convert(df_out['SPD'], 'mile/h_to_m/s')
if 'GUS' in columns:
df_out['GUS'] = convert(df_out['GUS'], 'mile/h_to_m/s')
if 'VSB' in columns:
df_out['VSB'] = convert(df_out['VSB'], 'mile_to_m')
if 'TEMP' in columns:
df_out['TEMP'] = convert(df_out['TEMP'], 'fahrenheit_to_celsius')
if 'DEWP' in columns:
df_out['DEWP'] = convert(df_out['DEWP'], 'fahrenheit_to_celsius')
if 'SLP' in columns :
df_out['SLP'] = convert(df_out['SLP'], 'mbar_to_N.m2')
if 'ALT' in columns :
df_out['ALT'] = convert(df_out['ALT'], 'inch_to_m')
if 'STP' in columns :
df_out['STP'] = convert(df_out['STP'], 'mbar_to_N.m2')
if 'MAX' in columns:
df_out['MAX'] = convert(df_out['MAX'], 'fahrenheit_to_celsius')
if 'MIN' in columns:
df_out['MIN'] = convert(df_out['MIN'], 'fahrenheit_to_celsius')
if 'PCP01' in columns:
df_out['PCP01'] = convert(df_out['PCP01'], 'inch_to_m')
if 'PCP06' in columns:
df_out['PCP06'] = convert(df_out['PCP06'], 'inch_to_m')
if 'PCP24' in columns:
df_out['PCP24'] = convert(df_out['PCP24'], 'inch_to_m')
if 'PCPXX' in columns:
df_out['PCPXX'] = convert(df_out['PCPXX'], 'inch_to_m')
if 'SD' in columns:
df_out['SD'] = convert(df_out['SD'], 'inch_to_m')
return df_out
def polar_stat(df, column, values=[], bounds=False):
"""
Compute dispersion of 'column' between wind direction and range of values
:param df: input dataframe
:param column: target of the statistics
:param values: values defining the intervals of analysis
:param bounds: add the bounds, ie intervals with min and max (boolean)
:return: dataframe of percentage for each direction and range of values
"""
# handle error
if column not in df.columns:
print('Column '+column+' not found in dataframe')
return None
# number of valid values in the direction column (DIR)
nb_valid = df['DIR'].notnull().sum()
# create output dataframe
df_polar = pd.DataFrame(index=df['DIR'].unique().sort())
df_polar.index.name = 'DIR'
if values == []:
# compute stats for all values
series = df.groupby('DIR')[column].count()/nb_valid
df_polar[column] = series*100
else:
# compute stats for lower bound: < first value
if bounds:
name = column + ' < ' + str(values[0])
selection = (df[column] < values[0])
series = df.loc[selection].groupby('DIR')[column].count()/nb_valid
df_polar[name] = series*100
for i in range(len(values)-1):
# compute stats for range [values[i], values[i+1][
name = str(values[i]) + ' <= ' + column + ' < ' + str(values[i+1])
selection = (df[column] >= values[i]) & (df[column] < values[i+1])
series = df.loc[selection].groupby('DIR')[column].count()/nb_valid
df_polar[name] = series*100
# compute stats for upper bound : > last value
if bounds:
name = str(values[-1]) + ' <= ' + column
selection = (df[column] >= values[-1])
series = df.loc[selection].groupby('DIR')[column].count()/nb_valid
df_polar[name] = series*100
return df_polar
def polar_plot(df_polar, close=False, output=None):
"""
Create a polar plot. The plot is saved as a figure if the name of an output
file is given ("output"), or displayed.
:param df_polar: input dataframe from the polar_stats function
:param close: close the polar plot (boolean)
:param output: name of the output
:return:
"""
# check input
if len(df_polar.columns) == 0:
raise ValueError('Empty polar dataframe')
if close:
# add a line to a copy, to close the directions
df_plot = pd.concat([ df_polar, df_polar.iloc[[0]] ])
else:
df_plot = df_polar
# directions in radians (for polar function)
direction_rad = | pd.np.radians(df_plot.index) | pandas.np.radians |
# coding: utf-8
# ## Lending Club - classification of loans
#
# This project aims to analyze data for loans through 2007-2015 from Lending Club available on Kaggle. Dataset contains over 887 thousand observations and 74 variables among which one is describing the loan status. The goal is to create machine learning model to categorize the loans as good or bad.
#
# Contents:
#
# 1. Preparing dataset for preprocessing
# 2. Reviewing variables - drop and edit
# 3. Missing values
# 4. Preparing dataset for modeling
# 5. Undersampling approach
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import datetime
import warnings
warnings.filterwarnings('ignore')
import seaborn as sns
sns.set(font_scale=1.6)
from sklearn.preprocessing import StandardScaler
# ### 1. Preparing dataset for preprocessing
#
# In this part I will load data, briefly review the variables and prepare the 'y' value that will describe each loan as good or bad.
# In[2]:
data=pd.read_csv('../input/loan.csv',parse_dates=True)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 20)
# In[3]:
data.shape
# In[4]:
data.head()
# In[5]:
pd.value_counts(data.loan_status).to_frame().reset_index()
# There are 9 unique loan statuses. I will drop ones that are fully paid as these are historical entries. Next step will be to assign 0 (good) to Current loans and 1 (bad) to rest including: default and late loans, ones that were charged off or are in grace period.
#
# First two are self-explanatory, charged off loan is a debt that is deemed unlikely to be collected by the creditor but the debt is not necessarily forgiven or written off entirely, a grace period is a provision in most loan contracts which allows payment to be received for a certain period of time after the actual due date.
# In[6]:
data = data[data.loan_status != 'Fully Paid']
data = data[data.loan_status != 'Does not meet the credit policy. Status:Fully Paid']
# In[7]:
data['rating'] = np.where((data.loan_status != 'Current'), 1, 0)
# In[8]:
pd.value_counts(data.rating).to_frame()
# In[9]:
print ('Bad Loan Ratio: %.2f%%' % (data.rating.sum()/len(data)*100))
# The data is strongly imbalanced, however there are over 75 thousand bad loans that should suffice for a model to learn.
# In[10]:
data.info()
# ### 2. Reviewing variables - drop and edit
#
# In this part I will review each non-numerical variable to either edit or drop it.
# There are two columns that describe a reason for the loan - title and purpose. As shown below title has many more categories which makes it less specific and helpful for the model, so it will be dropped.
# In[11]:
pd.value_counts(data.title).to_frame()
# In[12]:
pd.value_counts(data.purpose).to_frame()
# Application type variable shows whether the loan is individual or joint - number of joint loans will reflect huge number of NaN values in other variables dedicated for these loans.
#
# Will change this variable to binary.
# In[13]:
pd.value_counts(data.application_type).to_frame()
# In[14]:
app_type={'INDIVIDUAL':0,'JOINT':1}
data.application_type.replace(app_type,inplace=True)
# In[15]:
pd.value_counts(data.term).to_frame()
# Term variable will be changed to numerical.
# In[16]:
term={' 36 months':36,' 60 months':60}
data.term.replace(term,inplace=True)
# Following two variables are dedicated to credit rating of each individual. Will change them to numerical while making sure that the hierarchy is taken into account. Lowest number will mean best grade/subgrade.
# In[17]:
pd.value_counts(data.grade).to_frame()
# In[18]:
grade=data.grade.unique()
grade.sort()
grade
# In[19]:
for x,e in enumerate(grade):
data.grade.replace(to_replace=e,value=x,inplace=True)
# In[20]:
data.grade.unique()
# In[21]:
pd.value_counts(data.sub_grade).to_frame()
# In[22]:
sub_grade=data.sub_grade.unique()
sub_grade.sort()
sub_grade
# In[23]:
for x,e in enumerate(sub_grade):
data.sub_grade.replace(to_replace=e,value=x,inplace=True)
data.sub_grade.unique()
# Following two variables describe title and length of employment. Title has 212 thousand categories so it will be dropped. Lenghth of employment should be sufficient to show whether an individual has a stable job.
# In[24]:
pd.value_counts(data.emp_title).to_frame()
# In[25]:
pd.value_counts(data.emp_length).to_frame()
# In[26]:
emp_len={'n/a':0,'< 1 year':1,'1 year':2,'2 years':3,'3 years':4,'4 years':5,'5 years':6,'6 years':7,'7 years':8,'8 years':9,'9 years':10,'10+ years':11}
data.emp_length.replace(emp_len,inplace=True)
data.emp_length=data.emp_length.replace(np.nan,0)
data.emp_length.unique()
# Home ownership variable should be informative for model as individuals who own their home should be much safer clients that ones that only rent it.
# In[27]:
pd.value_counts(data.home_ownership).to_frame()
# Verification status variable indicated whether the source of income of a client was verified.
# In[28]:
pd.value_counts(data.verification_status).to_frame()
# Payment plan variable will be dropped as it has only 3 'y' values.
# In[29]:
pd.value_counts(data.pymnt_plan).to_frame()
# Zip code information is to specific, there are 930 individual values, and there is no sense to make it more general as cutting it to two digits as this will only describe state, which does next veriable. Zip code will be dropped.
# In[30]:
pd.value_counts(data.zip_code).to_frame()
# In[31]:
pd.value_counts(data.addr_state).to_frame()
# Next variable is initial listing status of the loan. Possible values are – W, F and will be changed to binary.
# In[32]:
pd.value_counts(data.initial_list_status).to_frame()
# In[33]:
int_status={'w':0,'f':1}
data.initial_list_status.replace(int_status,inplace=True)
# Policy code has only 1 value so will be dropped.
# In[34]:
pd.value_counts(data.policy_code).to_frame()
# Recoveries variable informs about post charge off gross recovery. Will transform this to binary that will show whether this loan was recoveried. Will drop recovery fee as it is doubling similar information.
# In[35]:
pd.value_counts(data.recoveries).to_frame()
# In[36]:
data['recovery'] = np.where((data.recoveries != 0.00), 1, 0)
# In[37]:
pd.value_counts(data.collection_recovery_fee).to_frame()
# There are couple variables that can be transformed to date time.
# In[38]:
data.issue_d= | pd.to_datetime(data.issue_d) | pandas.to_datetime |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
| tm.assert_numpy_array_equal(result.values, expected) | pandas.util.testing.assert_numpy_array_equal |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 14 12:46:24 2018
@author: nmei
"""
import os
working_dir = ''
import pandas as pd
pd.options.mode.chained_assignment = None
import seaborn as sns
import numpy as np
from statsmodels.formula.api import ols#,mixedlm
from statsmodels.stats.anova import anova_lm
from utils import eta_squared,omega_squared,resample_ttest_2sample,MCPConverter
from itertools import combinations
sns.set_style('whitegrid')
sns.set_context('poster')
saving_dir = '../figures/'
df_dir = '../results/for_spss'
def post_processing(df):
feature_names = [name for name in df.columns if 'coef' in name] # the feature names with "coef" in them
feature_name_wk = feature_names[1:] # take the intercept out
working_df = df[feature_name_wk] #
for name in feature_name_wk:
working_df[name] = working_df[name].apply(np.exp)
new_col_names = {name:name[:-5] for name in feature_name_wk}
working_df['model'] = 'logistic'
working_df['window'] = df['window']
working_df = working_df.rename(new_col_names,axis='columns')
df_plot = pd.melt(working_df,id_vars = ['model','window'],
value_vars = new_col_names.values())
df_plot.columns = ['Model','Window','Coefficients','Odd_Ratio']
return df_plot
def thresholding(value):
if value < 0.001:
return "***"
elif value < 0.01:
return "**"
elif value < 0.05:
return "*"
else:
return "ns"
def preparation(c):
df_temp = {}
for ((window,feature),df_sub) in c.groupby(['Window','Coefficients']):
df_temp['{}_win{}_{}'.format('logistic',window,feature)] = df_sub['Odd_Ratio'].values
df_temp = pd.DataFrame(df_temp)
return df_temp
"""
Take the exponential of each of the coefficients to generate the odds ratios.
This tells you how a 1 unit increase or decrease in a variable affects the odds of being high POS.
"""
if __name__ == '__main__':
results = []
aov_tables = []
##########################################################################################################################################
pos = pd.read_csv('../results/pos_logistic_statsmodel_6_features.csv')
att = pd.read_csv('../results/att_logistic_statsmodel_6_features.csv')
df = pos.copy()
df_plot = post_processing(df) # process the dataframe with melt or sth
df_plot = df_plot[(df_plot['Window']>0) & (df_plot['Window']<4)] # get the window
df_temp = preparation(df_plot)
writer = pd.ExcelWriter(os.path.join(df_dir,'pos,6 features,odd ratio.xlsx'))
df_temp.to_excel(writer,'sheet1',index=False);writer.save()
######
df = att.copy()
df_plot = post_processing(df)
df_plot = df_plot[(df_plot['Window']>0) & (df_plot['Window']<4)]
df_temp = preparation(df_plot)
writer = pd.ExcelWriter(os.path.join(df_dir,'att,6 features,odd ratio.xlsx'))
df_temp.to_excel(writer,'sheet1',index=False);writer.save()
###################################################################################
################### 3 judgement features #########################################
###################################################################################
pos = pd.read_csv('../results/pos_logistic_statsmodel_3_1_features.csv')
att = pd.read_csv('../results/att_logistic_statsmodel_3_1_features.csv')
df = pos.copy()
df_plot = post_processing(df)
df_plot = df_plot[(df_plot['Window']>0) & (df_plot['Window']<4)]
df_temp = preparation(df_plot)
writer = pd.ExcelWriter(os.path.join(df_dir,'pos,judgment features,odd ratio.xlsx'))
df_temp.to_excel(writer,'sheet1',index=False);writer.save()
######
df = att.copy()
df_plot = post_processing(df)
df_plot = df_plot[(df_plot['Window']>0) & (df_plot['Window']<4)]
df_temp = preparation(df_plot)
writer = pd.ExcelWriter(os.path.join(df_dir,'att,judgment features,odd ratio.xlsx'))
df_temp.to_excel(writer,'sheet1',index=False);writer.save()
# no main effect of coefficients nor interaction
###############################################################################################
############################# RT as features #################################################
###############################################################################################
pos = pd.read_csv('../results/pos_logistic_statsmodel_RT_features.csv')
att = | pd.read_csv('../results/att_logistic_statsmodel_RT_features.csv') | pandas.read_csv |
#
# Copyright (c) 2021 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
from bisect import bisect_left, bisect_right
import itertools
import logging
from typing import Any, Dict, Iterable, Mapping, Sequence, Tuple, Union
import warnings
import numpy as np
import pandas as pd
from .misc import ValIterOrderedDict
from .resample import (
AggregationPolicy,
AlignPolicy,
MissingValuePolicy,
get_gcd_timedelta,
granularity_str_to_seconds,
reindex_df,
to_pd_datetime,
)
logger = logging.getLogger(__name__)
class UnivariateTimeSeries(pd.Series):
"""
Please read the `tutorial <examples/TimeSeries>` before reading this API doc.
This class is a time-indexed ``pd.Series`` which represents a univariate
time series. For the most part, it supports all the same features as
``pd.Series``, with the following key differences to iteration and indexing:
1. Iterating over a `UnivariateTimeSeries` is implemented as
.. code-block:: python
for timestamp, value in univariate:
# do stuff...
where ``timestamp`` is a Unix timestamp, and ``value`` is the
corresponding time series value.
2. Integer index: ``u[i]`` yields the tuple ``(u.time_stamps[i], u.values[i])``
3. Slice index: ``u[i:j:k]`` yields a new
``UnivariateTimeSeries(u.time_stamps[i:j:k], u.values[i:j:k])``
The class also supports the following additional features:
1. ``univariate.time_stamps`` returns the list of Unix timestamps, and
``univariate.values`` returns the list of the time series values. You
may access the ``pd.DatetimeIndex`` directly with ``univariate.index``
(or its ``np.ndarray`` representation with ``univariate.np_time_stamps``),
and the ``np.ndarray`` of values with ``univariate.np_values``.
2. ``univariate.concat(other)`` will concatenate the UnivariateTimeSeries
``other`` to the right end of ``univariate``.
3. ``left, right = univariate.bisect(t)`` will split the univariate at the
given timestamp ``t``.
4. ``window = univariate.window(t0, tf)`` will return the subset of the time
series occurring between timestamps ``t0`` (inclusive) and ``tf``
(non-inclusive)
5. ``series = univariate.to_pd()`` will convert the `UnivariateTimeSeries`
into a regular ``pd.Series`` (for compatibility).
6. ``univariate = UnivariateTimeSeries.from_pd(series)`` uses a time-indexed
``pd.Series`` to create a `UnivariateTimeSeries` object directly.
.. document special functions
.. automethod:: __getitem__
.. automethod:: __iter__
"""
def __init__(
self,
time_stamps: Union[None, Sequence[Union[int, float]]],
values: Sequence[float],
name: str = None,
freq="1h",
):
"""
:param time_stamps: a sequence of Unix timestamps. You may specify
``None`` if you only have ``values`` with no specific time stamps.
:param values: a sequence of univariate values, where ``values[i]``
occurs at time ``time_stamps[i]``
:param name: the name of the univariate time series
:param freq: if ``time_stamps`` is not provided, the univariate is
assumed to be sampled at frequency ``freq``. ``freq`` may be a
string (e.g. ``"1h"``), timedelta, or ``int``/``float`` (in units
of seconds).
"""
is_pd = isinstance(values, pd.Series)
if name is None and is_pd:
name = values.name
if is_pd and isinstance(values.index, pd.DatetimeIndex):
super().__init__(values, name=name)
else:
if time_stamps is None:
if isinstance(freq, (int, float)):
freq = pd.to_timedelta(freq, unit="s")
else:
freq = pd.to_timedelta(freq)
if is_pd and values.index.dtype in ("int64", "float64"):
index = values.index * freq + pd.to_datetime(0)
else:
index = pd.date_range(start=0, periods=len(values), freq=freq)
else:
index = to_pd_datetime(time_stamps)
super().__init__(np.asarray(values), index=index, name=name, dtype=float)
if len(self) >= 3 and self.index.freq is None:
self.index.freq = | pd.infer_freq(self.index) | pandas.infer_freq |
# -- coding: utf-8 --
import tensorflow as tf
import pandas as pd
import numpy as np
import csv
file_path=r'/Users/guojianzou/PycharmProjects/OD/data/Order_all.csv'
save_path=r'/Users/guojianzou/PycharmProjects/OD/data/data_all.csv'
train_path=r'/Users/guojianzou/PycharmProjects/OD/data/train_data.csv'
combine_path=r'/Users/guojianzou/PycharmProjects/OD/data/combine_data.csv'
data_colum=["ZoneID","Area","Slon","Slat","Elon","Elat","day","hour","min","second"]
def data_save(file_path,save_pave):
'''
:param file_name:
:return:
dtype pd.datafrme
'''
data = | pd.read_csv(file_path, encoding='utf-8') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 11:41:44 2018
@author: MichaelEK
"""
import os
import argparse
import types
import pandas as pd
import numpy as np
from pdsql import mssql
from datetime import datetime
import yaml
import itertools
import lowflows as lf
import util
pd.options.display.max_columns = 10
run_time_start = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
print(run_time_start)
try:
#####################################
### Read parameters file
base_dir = os.path.realpath(os.path.dirname(__file__))
with open(os.path.join(base_dir, 'parameters-test.yml')) as param:
param = yaml.safe_load(param)
# parser = argparse.ArgumentParser()
# parser.add_argument('yaml_path')
# args = parser.parse_args()
#
# with open(args.yaml_path) as param:
# param = yaml.safe_load(param)
## Integrety checks
use_types_check = np.in1d(list(param['misc']['use_types_codes'].keys()), param['misc']['use_types_priorities']).all()
if not use_types_check:
raise ValueError('use_type_priorities parameter does not encompass all of the use type categories. Please fix the parameters file.')
#####################################
### Read the hydro log
# max_date_stmt = "select max(RunTimeStart) from " + param.log_table + " where HydroTable='" + param.process_name + "' and RunResult='pass' and ExtSystem='" + param.ext_system + "'"
#
# last_date1 = mssql.rd_sql(server=param.hydro_server, database=param.hydro_database, stmt=max_date_stmt).loc[0][0]
#
# if last_date1 is None:
# last_date1 = '1900-01-01'
# else:
# last_date1 = str(last_date1.date())
#
# print('Last sucessful date is ' + last_date1)
#######################################
### Read in source data and update accela tables in ConsentsReporting db
print('--Reading in source data...')
## Make object to contain the source data
db = types.SimpleNamespace()
for i, p in param['source data'].items():
setattr(db, i, mssql.rd_sql(p['server'], p['database'], p['table'], p['col_names'], rename_cols=p['rename_cols'], username=p['username'], password=p['password']))
if (p['database'] == 'Accela') & (not (p['table'] in ['Ecan.vAct_Water_AssociatedPermits', 'Ecan.vQA_Relationship_Actuals'])):
table1 = 'Accela.' + p['table'].split('Ecan.')[1]
print(table1)
t1 = getattr(db, i).copy().dropna(subset=p['pk'])
t1.drop_duplicates(p['pk'], inplace=True)
print('update in db')
new_ones, _ = mssql.update_from_difference(t1, param['output']['server'], param['output']['database'], table1, on=p['pk'], mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
######################################
### Populate base tables
print('--Update base tables')
## HydroGroup
hf1 = pd.DataFrame(param['misc']['HydroGroup'])
hf1['ModifiedDate'] = run_time_start
hf0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'HydroGroup', username=param['output']['username'], password=param['output']['password'])
hf_diff1 = hf1[~hf1.HydroGroup.isin(hf0.HydroGroup)]
if not hf_diff1.empty:
mssql.to_mssql(hf_diff1, param['output']['server'], param['output']['database'], 'HydroGroup', username=param['output']['username'], password=param['output']['password'])
hf0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'HydroGroup', username=param['output']['username'], password=param['output']['password'])
## Activity
act1 = param['misc']['Activities']['ActivityType']
act2 = pd.DataFrame(list(itertools.product(act1, hf0.HydroGroupID.tolist())), columns=['ActivityType', 'HydroGroupID'])
act2['ModifiedDate'] = run_time_start
act0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Activity', username=param['output']['username'], password=param['output']['password'])
act_diff1 = act2[~act2[['ActivityType', 'HydroGroupID']].isin(act0[['ActivityType', 'HydroGroupID']]).any(axis=1)]
if not act_diff1.empty:
mssql.to_mssql(act_diff1, param['output']['server'], param['output']['database'], 'Activity', username=param['output']['username'], password=param['output']['password'])
act0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Activity', username=param['output']['username'], password=param['output']['password'])
# Combine activity and hydro features
act_types1 = pd.merge(act0[['ActivityID', 'ActivityType', 'HydroGroupID']], hf0[['HydroGroupID', 'HydroGroup']], on='HydroGroupID')
act_types1['ActivityName'] = act_types1['ActivityType'] + ' ' + act_types1['HydroGroup']
## AlloBlock
ab0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'AlloBlock', username=param['output']['username'], password=param['output']['password'])
sw_blocks1 = pd.Series(db.wap_allo['sw_allo_block'].unique())
gw_blocks1 = pd.Series(db.allocated_volume['allo_block'].unique())
# Fixes
wap_allo1 = db.wap_allo.copy()
wap_allo1['sw_allo_block'] = wap_allo1['sw_allo_block'].str.strip()
wap_allo1.loc[wap_allo1.sw_allo_block == 'Migration: Not Classified', 'sw_allo_block'] = 'A'
allo_vol1 = db.allocated_volume.copy()
allo_vol1['allo_block'] = allo_vol1['allo_block'].str.strip()
allo_vol1.loc[allo_vol1.allo_block == 'Migration: Not Classified', 'allo_block'] = 'A'
# Determine blocks and what needs to be added
sw_blocks1 = set(wap_allo1['sw_allo_block'].unique())
gw_blocks1 = set(allo_vol1['allo_block'].unique())
blocks1 = sw_blocks1.union(gw_blocks1)
ab1 = pd.DataFrame(list(itertools.product(blocks1, hf0.HydroGroupID.tolist())), columns=['AllocationBlock', 'HydroGroupID'])
ab1['ModifiedDate'] = run_time_start
ab0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'AlloBlock', username=param['output']['username'], password=param['output']['password'])
ab_diff1 = ab1[~ab1[['AllocationBlock', 'HydroGroupID']].isin(ab0[['AllocationBlock', 'HydroGroupID']]).any(axis=1)]
if not ab_diff1.empty:
mssql.to_mssql(ab_diff1, param['output']['server'], param['output']['database'], 'AlloBlock', username=param['output']['username'], password=param['output']['password'])
ab0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'AlloBlock', username=param['output']['username'], password=param['output']['password'])
# Combine alloblock and hydro features
ab_types1 = pd.merge(ab0[['AlloBlockID', 'AllocationBlock', 'HydroGroupID']], hf0[['HydroGroupID', 'HydroGroup']], on='HydroGroupID').drop('HydroGroupID', axis=1)
## Attributes
att1 = pd.DataFrame(param['misc']['Attributes'])
att1['ModifiedDate'] = run_time_start
att0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Attributes', username=param['output']['username'], password=param['output']['password'])
att_diff1 = att1[~att1.Attribute.isin(att0.Attribute)]
if not att_diff1.empty:
mssql.to_mssql(att_diff1, param['output']['server'], param['output']['database'], 'Attributes', username=param['output']['username'], password=param['output']['password'])
att0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Attributes', username=param['output']['username'], password=param['output']['password'])
##################################################
### Sites and streamdepletion
print('--Update sites tables')
## takes
wap_allo1['WAP'] = wap_allo1['WAP'].str.strip().str.upper()
wap_allo1.loc[~wap_allo1.WAP.str.contains('[A-Z]+\d\d/\d\d\d\d'), 'WAP'] = np.nan
wap1 = wap_allo1['WAP'].unique()
wap1 = wap1[~pd.isnull(wap1)]
## Diverts
div1 = db.divert.copy()
div1['WAP'] = div1['WAP'].str.strip().str.upper()
div1.loc[~div1.WAP.str.contains('[A-Z]+\d\d/\d\d\d\d'), 'WAP'] = np.nan
wap2 = div1['WAP'].unique()
wap2 = wap2[~pd.isnull(wap2)]
## Combo
waps = np.concatenate((wap1, wap2), axis=None)
## Check that all WAPs exist in the USM sites table
usm_waps1 = db.sites[db.sites.ExtSiteID.isin(waps)].copy()
usm_waps1[['NZTMX', 'NZTMY']] = usm_waps1[['NZTMX', 'NZTMY']].astype(int)
if len(wap1) != len(usm_waps1):
miss_waps = set(wap1).difference(set(usm_waps1.ExtSiteID))
print('Missing {} WAPs in USM'.format(len(miss_waps)))
wap_allo1 = wap_allo1[~wap_allo1.WAP.isin(miss_waps)].copy()
## Update ConsentsSites table
cs1 = usm_waps1[['ExtSiteID', 'SiteName']].copy()
# cs1['SiteType'] = 'WAP'
new_sites, _ = mssql.update_from_difference(cs1, param['output']['server'], param['output']['database'], 'ConsentsSites', on='ExtSiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ConsentsSites', 'pass', '{} sites updated'.format(len(new_sites)), username=param['output']['username'], password=param['output']['password'])
cs0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'ConsentsSites', ['SiteID', 'ExtSiteID'], username=param['output']['username'], password=param['output']['password'])
cs_waps2 = pd.merge(cs0, usm_waps1.drop('SiteName', axis=1), on='ExtSiteID')
cs_waps3 = pd.merge(cs_waps2, db.wap_sd, on='ExtSiteID').drop('ExtSiteID', axis=1).round()
new_waps, _ = mssql.update_from_difference(cs_waps3, param['output']['server'], param['output']['database'], 'SiteStreamDepletion', on='SiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'WAP', 'pass', '{} sites updated'.format(len(new_waps)), username=param['output']['username'], password=param['output']['password'])
## Read db table
# wap0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'SiteStreamDepletion')
## Make linked WAP-SiteID table
wap_site = cs0.rename(columns={'ExtSiteID': 'WAP'})
##################################################
### Permit table
print('--Update Permit table')
## Clean data
permits1 = db.permit.copy()
permits1['RecordNumber'] = permits1['RecordNumber'].str.strip().str.upper()
permits1['ConsentStatus'] = permits1['ConsentStatus'].str.strip()
permits1['EcanID'] = permits1['EcanID'].str.strip().str.upper()
permits1['FromDate'] = pd.to_datetime(permits1['FromDate'], infer_datetime_format=True, errors='coerce')
permits1['ToDate'] = pd.to_datetime(permits1['ToDate'], infer_datetime_format=True, errors='coerce')
permits1.loc[permits1['ConsentStatus'] == 'Issued - s124 Continuance', 'ToDate'] = permits1.loc[permits1['ConsentStatus'] == 'Issued - s124 Continuance', 'FromDate'] + pd.DateOffset(years=30)
permits1[['NZTMX', 'NZTMY']] = permits1[['NZTMX', 'NZTMY']].round()
permits1.loc[(permits1['FromDate'] < '1950-01-01'), 'FromDate'] = np.nan
permits1.loc[(permits1['ToDate'] < '1950-01-01'), 'ToDate'] = np.nan
## Filter data
permits2 = permits1.drop_duplicates('RecordNumber')
permits2 = permits2[permits2.ConsentStatus.notnull() & permits2.RecordNumber.notnull() & permits2['EcanID'].notnull()].copy()
# permits2 = permits2[(permits2['FromDate'] > '1950-01-01') & (permits2['ToDate'] > '1950-01-01') & (permits2['ToDate'] > permits2['FromDate']) & permits2.NZTMX.notnull() & permits2.NZTMY.notnull() & permits2.ConsentStatus.notnull() & permits2.RecordNumber.notnull() & permits2['EcanID'].notnull()].copy()
## Convert datetimes to date
permits2['FromDate'] = permits2['FromDate'].dt.date
permits2['ToDate'] = permits2['ToDate'].dt.date
permits2.loc[permits2['FromDate'].isnull(), 'FromDate'] = '1900-01-01'
permits2.loc[permits2['ToDate'].isnull(), 'ToDate'] = '1900-01-01'
## Save results
new_permits, _ = mssql.update_from_difference(permits2, param['output']['server'], param['output']['database'], 'Permit', on='RecordNumber', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'Permit', 'pass', '{} rows updated'.format(len(new_permits)), username=param['output']['username'], password=param['output']['password'])
## Read db table
permits0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Permit', username=param['output']['username'], password=param['output']['password'])
##################################################
### Parent-Child
print('--Update Parent-child table')
## Clean data
pc1 = db.parent_child.copy()
pc1['ParentRecordNumber'] = pc1['ParentRecordNumber'].str.strip().str.upper()
pc1['ChildRecordNumber'] = pc1['ChildRecordNumber'].str.strip().str.upper()
pc1['ParentCategory'] = pc1['ParentCategory'].str.strip()
pc1['ChildCategory'] = pc1['ChildCategory'].str.strip()
## Filter data
pc1 = pc1.drop_duplicates()
pc1 = pc1[pc1['ParentRecordNumber'].notnull() & pc1['ChildRecordNumber'].notnull()]
## Check foreign keys
crc1 = permits0.RecordNumber.unique()
pc2 = pc1[pc1.ParentRecordNumber.isin(crc1) & pc1.ChildRecordNumber.isin(crc1)].copy()
## Save results
new_pc, _ = mssql.update_from_difference(pc2, param['output']['server'], param['output']['database'], 'ParentChild', on=['ParentRecordNumber', 'ChildRecordNumber'], mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ParentChild', 'pass', '{} rows updated'.format(len(new_pc)), username=param['output']['username'], password=param['output']['password'])
## Read db table
pc0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'ParentChild', username=param['output']['username'], password=param['output']['password'])
#################################################
### AllocatedRatesVolumes
print('--Update Allocation tables')
attr1 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Attributes', ['AttributeID', 'Attribute'], username=param['output']['username'], password=param['output']['password'])
## Rates
# Clean data
wa1 = wap_allo1.copy()
wa1['RecordNumber'] = wa1['RecordNumber'].str.strip().str.upper()
wa1['take_type'] = wa1['take_type'].str.strip().str.title()
wa1['FromMonth'] = wa1['FromMonth'].str.strip().str.title()
wa1['ToMonth'] = wa1['ToMonth'].str.strip().str.title()
wa1['IncludeInSwAllocation'] = wa1['IncludeInSwAllocation'].str.strip().str.title()
wa1['AllocatedRate'] = pd.to_numeric(wa1['AllocatedRate'], errors='coerce').round(2)
wa1['WapRate'] = pd.to_numeric(wa1['WapRate'], errors='coerce').round(2)
wa1['VolumeDaily'] = pd.to_numeric(wa1['VolumeDaily'], errors='coerce').astype(int)
wa1['VolumeWeekly'] = pd.to_numeric(wa1['VolumeWeekly'], errors='coerce').astype(int)
wa1['Volume150Day'] = pd.to_numeric(wa1['Volume150Day'], errors='coerce').astype(int)
wa1.loc[wa1['FromMonth'] == 'Migration: Not Classified', 'FromMonth'] = 'Jul'
wa1.loc[wa1['ToMonth'] == 'Migration: Not Classified', 'ToMonth'] = 'Jun'
mon_mapping = {'Jan': 7, 'Feb': 8, 'Mar': 9, 'Apr': 10, 'May': 11, 'Jun': 12, 'Jul': 1, 'Aug': 2, 'Sep': 3, 'Oct': 4, 'Nov': 5, 'Dec': 6}
wa1.replace({'FromMonth': mon_mapping, 'ToMonth': mon_mapping}, inplace=True)
wa1.loc[wa1['IncludeInSwAllocation'] == 'No', 'IncludeInSwAllocation'] = False
wa1.loc[wa1['IncludeInSwAllocation'] == 'Yes', 'IncludeInSwAllocation'] = True
wa1.replace({'sw_allo_block': {'In Waitaki': 'A'}}, inplace=True)
# Check foreign keys
wa4 = wa1[wa1.RecordNumber.isin(crc1)].copy()
# Filters
# wa4 = wa2[(wa2.AllocatedRate > 0)].copy()
# wa3.loc[~wa3['IncludeInSwAllocation'], ['AllocatedRate', 'SD1', 'SD2']] = 0
# wa4 = wa3.drop('IncludeInSwAllocation', axis=1).copy()
# Find the missing WAPs per consent
crc_wap_mis1 = wa4.loc[wa4.WAP.isnull(), 'RecordNumber'].unique()
crc_wap4 = wa4[['RecordNumber', 'WAP']].drop_duplicates()
for i in crc_wap_mis1:
crc2 = pc0[np.in1d(pc0.ParentRecordNumber, i)].ChildRecordNumber.values
wap1 = []
while (len(crc2) > 0) & (len(wap1) == 0):
wap1 = crc_wap4.loc[np.in1d(crc_wap4.RecordNumber, crc2), 'WAP'].values
crc2 = pc0[np.in1d(pc0.ParentRecordNumber, crc2)].ChildRecordNumber.values
if len(wap1) > 0:
wa4.loc[wa4.RecordNumber == i, 'WAP'] = wap1[0]
wa4 = wa4[wa4.WAP.notnull()].copy()
wa4.rename(columns={'sw_allo_block': 'AllocationBlock'}, inplace=True)
# Distribute the months
cols1 = wa4.columns.tolist()
from_mon_pos = cols1.index('FromMonth')
to_mon_pos = cols1.index('ToMonth')
allo_rates_list = []
# c1 = 0
for val in wa4.itertuples(False, None):
from_month = int(val[from_mon_pos])
to_month = int(val[to_mon_pos])
if from_month > to_month:
mons = list(range(1, to_month + 1))
# c1 = c1 + 1
else:
mons = range(from_month, to_month + 1)
d1 = [val + (i,) for i in mons]
allo_rates_list.extend(d1)
col_names1 = wa4.columns.tolist()
col_names1.extend(['Month'])
wa5 = pd.DataFrame(allo_rates_list, columns=col_names1).drop(['FromMonth', 'ToMonth'], axis=1)
# Mean of all months
grp1 = wa5.groupby(['RecordNumber', 'take_type', 'AllocationBlock', 'WAP'])
mean1 = grp1[['WapRate', 'AllocatedRate', 'VolumeDaily', 'VolumeWeekly', 'Volume30Day', 'Volume150Day', 'SD1', 'SD2']].mean().round(2)
include1 = grp1['IncludeInSwAllocation'].first()
mon_min = grp1['Month'].min()
mon_min.name = 'FromMonth'
mon_max = grp1['Month'].max()
mon_max.name = 'ToMonth'
wa6 = pd.concat([mean1, mon_min, mon_max, include1], axis=1).reset_index()
# wa6['HydroGroup'] = 'Surface Water'
## Allocated Volume
av1 = allo_vol1.copy()
# clean data
av1['RecordNumber'] = av1['RecordNumber'].str.strip().str.upper()
av1['take_type'] = av1['take_type'].str.strip().str.title()
av1['IncludeInGwAllocation'] = av1['IncludeInGwAllocation'].str.strip().str.title()
av1.loc[av1['IncludeInGwAllocation'] == 'No', 'IncludeInGwAllocation'] = False
av1.loc[av1['IncludeInGwAllocation'] == 'Yes', 'IncludeInGwAllocation'] = True
av1['IncludeInGwAllocation'] = av1['IncludeInGwAllocation'].astype(bool)
# av1['AllocatedAnnualVolume'] = pd.to_numeric(av1['AllocatedAnnualVolume'], errors='coerce').astype(int)
av1['FullAnnualVolume'] = pd.to_numeric(av1['FullAnnualVolume'], errors='coerce').astype(int)
# av1.loc[av1['AllocatedAnnualVolume'] <= 0, 'AllocatedAnnualVolume'] = 0
# av1 = av1.loc[av1['AllocatedAnnualVolume'] > 0]
av1.rename(columns={'allo_block': 'AllocationBlock'}, inplace=True)
av1.drop('AllocatedAnnualVolume', axis=1, inplace=True)
av1.replace({'AllocationBlock': {'In Waitaki': 'A'}}, inplace=True)
av1.drop_duplicates(subset=['RecordNumber', 'take_type', 'AllocationBlock'], inplace=True)
## Combine volumes with rates
wa7 = pd.merge(av1, wa6, on=['RecordNumber', 'take_type', 'AllocationBlock'])
## Distribute the volumes by WapRate
wa8 = wa7.copy()
grp3 = wa8.groupby(['RecordNumber', 'take_type', 'AllocationBlock'])
wa8['WapRateAgg'] = grp3['WapRate'].transform('sum')
wa8['ratio'] = wa8['WapRate'] / wa8['WapRateAgg']
wa8.loc[wa8['ratio'].isnull(), 'ratio'] = 1
wa8['FullAnnualVolume'] = (wa8['FullAnnualVolume'] * wa8['ratio']).round()
wa8.drop(['WapRateAgg', 'ratio', 'VolumeDaily', 'VolumeWeekly', 'Volume30Day', 'Volume150Day', 'SD2', 'WapRate'], axis=1, inplace=True)
wa8 = wa8[wa8.FullAnnualVolume >= 0].copy()
## Add in stream depletion
# wa9 = pd.merge(wa8, db.wap_sd.rename(columns={'ExtSiteID': 'WAP'}), on='WAP').drop(['SD1_NZTMX', 'SD1_NZTMY', 'SD1_30Day', 'SD2_NZTMX', 'SD2_NZTMY', 'SD2_7Day', 'SD2_30Day', 'SD2_150Day', 'SD1', 'SD2'], axis=1)
#
# wa9['SD1_7Day'] = pd.to_numeric(wa9['SD1_7Day'], errors='coerce').round(0)
# wa9['SD1_150Day'] = pd.to_numeric(wa9['SD1_150Day'], errors='coerce').round(0)
## Combine with aquifer test storativity
# aq1 = db.wap_aquifer_test.dropna(subset=['storativity']).copy()
# aq1.rename(columns={'ExtSiteID': 'WAP'}, inplace=True)
# aq2 = aq1.groupby('WAP')['storativity'].mean().dropna().reset_index()
# aq2.storativity = True
#
# wa9 = pd.merge(wa9, aq2, on='WAP', how='left')
# wa9.loc[wa9.storativity.isnull(), 'storativity'] = False
## Distribute the rates and volumes by allocation hydro group
wa8['sw_rate'] = 0
wa8['gw_rate'] = 0
wa8['sw_vol'] = 0
wa8['gw_vol'] = 0
wa8.loc[wa8.take_type == 'Take Surface Water', 'sw_rate'] = wa8.loc[wa8.take_type == 'Take Surface Water', 'AllocatedRate']
wa8.loc[wa8.take_type == 'Take Groundwater', 'sw_rate'] = wa8.loc[wa8.take_type == 'Take Groundwater', 'SD1']
wa8.loc[wa8.take_type == 'Take Groundwater', 'gw_rate'] = wa8.loc[wa8.take_type == 'Take Groundwater', 'AllocatedRate'] - wa8.loc[wa8.take_type == 'Take Groundwater', 'SD1']
wa8.loc[wa8.take_type == 'Take Surface Water', 'sw_vol'] = wa8.loc[wa8.take_type == 'Take Surface Water', 'FullAnnualVolume']
wa8.loc[wa8.take_type == 'Take Groundwater', 'sw_vol'] = (wa8.loc[wa8.take_type == 'Take Groundwater', 'SD1']/wa8.loc[wa8.take_type == 'Take Groundwater', 'AllocatedRate']) * wa8.loc[wa8.take_type == 'Take Groundwater', 'FullAnnualVolume']
wa8.loc[wa8.take_type == 'Take Groundwater', 'gw_vol'] = (wa8.loc[wa8.take_type == 'Take Groundwater', 'gw_rate']/wa8.loc[wa8.take_type == 'Take Groundwater', 'AllocatedRate']) * wa8.loc[wa8.take_type == 'Take Groundwater', 'FullAnnualVolume']
allo_list = []
for k, row in wa8.iterrows():
# print(k)
if row['IncludeInSwAllocation']:
sw1 = row[['RecordNumber', 'AllocationBlock', 'WAP', 'FromMonth', 'ToMonth', 'sw_rate', 'sw_vol']].rename({'sw_rate': 'AllocatedRate', 'sw_vol': 'AllocatedAnnualVolume'})
sw1['HydroGroup'] = 'Surface Water'
allo_list.append(sw1.to_frame().T)
if row['IncludeInGwAllocation']:
gw1 = row[['RecordNumber', 'AllocationBlock', 'WAP', 'FromMonth', 'ToMonth', 'gw_rate', 'gw_vol']].rename({'gw_rate': 'AllocatedRate', 'gw_vol': 'AllocatedAnnualVolume'})
gw1['HydroGroup'] = 'Groundwater'
allo_list.append(gw1.to_frame().T)
rv1 = pd.concat(allo_list)
rv1['AllocatedAnnualVolume'] = pd.to_numeric(rv1['AllocatedAnnualVolume'])
rv1['AllocatedRate'] = pd.to_numeric(rv1['AllocatedRate'])
rv1['FromMonth'] = pd.to_numeric(rv1['FromMonth'], downcast='integer')
rv1['ToMonth'] = pd.to_numeric(rv1['ToMonth'], downcast='integer')
rv1.loc[rv1['AllocatedAnnualVolume'].isnull(), 'AllocatedAnnualVolume'] = 0
rv1.loc[rv1['AllocatedAnnualVolume'] == np.inf, 'AllocatedAnnualVolume'] = 0
rv1.loc[rv1['AllocatedRate'].isnull(), 'AllocatedRate'] = 0
rv1.loc[rv1['AllocatedRate'] == np.inf, 'AllocatedRate'] = 0
# Cut out the fat
rv4 = rv1[(rv1['AllocatedAnnualVolume'] > 0) | (rv1['AllocatedRate'] > 0)].copy()
## Calculate missing volumes and rates
ann_bool = rv4.AllocatedAnnualVolume == 0
rv4.loc[ann_bool, 'AllocatedAnnualVolume'] = (rv4.loc[ann_bool, 'AllocatedRate'] * 0.001*60*60*24*30.42* (rv4.loc[ann_bool, 'ToMonth'] - rv4.loc[ann_bool, 'FromMonth'] + 1))
rate_bool = rv4.AllocatedRate == 0
rv4.loc[rate_bool, 'AllocatedRate'] = (rv4.loc[rate_bool, 'AllocatedAnnualVolume'] / 60/60/24/30.42/ (rv4.loc[rate_bool, 'ToMonth'] - rv4.loc[rate_bool, 'FromMonth'] + 1) * 1000)
## Convert the rates and volumes to integers
rv4['AllocatedAnnualVolume'] = rv4['AllocatedAnnualVolume'].round().astype(int)
rv4['AllocatedRate'] = rv4['AllocatedRate'].round().astype(int)
## Merge tables for IDs
avr5 = pd.merge(rv4, ab_types1, on=['AllocationBlock', 'HydroGroup']).drop(['AllocationBlock', 'HydroGroup'], axis=1).copy()
avr6 = pd.merge(avr5, wap_site, on='WAP').drop('WAP', axis=1)
## Update CrcAlloSite table
crc_allo = avr6[['RecordNumber', 'AlloBlockID', 'SiteID']].copy()
crc_allo['SiteAllo'] = True
crc_allo['SiteType'] = 'WAP'
## Determine which rows should be updated
# old_crc_allo = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcAlloSite', where_in={'SiteAllo': [1], 'SiteType': ['WAP']})
#
# diff_dict = mssql.compare_dfs(old_crc_allo.drop(['CrcAlloSiteID', 'ModifiedDate'], axis=1), crc_allo, on=['RecordNumber', 'AlloBlockID', 'SiteID'])
#
# both1 = pd.concat([diff_dict['new'], diff_dict['diff']])
#
# rem1 = diff_dict['remove']
# Save results
new_crc_allo, rem_crc_allo = mssql.update_from_difference(crc_allo, param['output']['server'], param['output']['database'], 'CrcAlloSite', on=['RecordNumber', 'AlloBlockID', 'SiteID'], mod_date_col='ModifiedDate', where_cols=['SiteID', 'SiteType'], username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'CrcAlloSite', 'pass', '{} rows updated'.format(len(new_crc_allo)), username=param['output']['username'], password=param['output']['password'])
# Read db table
allo_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcAlloSite', ['CrcAlloSiteID', 'RecordNumber', 'AlloBlockID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])
# Remove old data if needed
if not rem_crc_allo.empty:
rem_crc_allo1 = pd.merge(allo_site0, rem_crc_allo, on=['RecordNumber', 'AlloBlockID', 'SiteID']).drop(['RecordNumber', 'AlloBlockID', 'SiteID'], axis=1)
mssql.del_table_rows(param['output']['server'], param['output']['database'], 'AllocatedRateVolume', rem_crc_allo1, username=param['output']['username'], password=param['output']['password'])
# mssql.del_table_rows(param['output']['server'], param['output']['database'], 'TSLowFlowRestr', rem_crc_allo1, username=param['output']['username'], password=param['output']['password'])
# mssql.del_table_rows(param['output']['server'], param['output']['database'], 'LowFlowConditions', rem_crc_allo1, username=param['output']['username'], password=param['output']['password'])
# mssql.del_table_rows(param['output']['server'], param['output']['database'], 'CrcAlloSite', rem_crc_allo1, username=param['output']['username'], password=param['output']['password'])
allo_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcAlloSite', ['CrcAlloSiteID', 'RecordNumber', 'AlloBlockID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])
## Update AllocatedRateVolume table
avr7 = pd.merge(allo_site0, avr6, on=['RecordNumber', 'AlloBlockID', 'SiteID']).drop(['RecordNumber', 'AlloBlockID', 'SiteID'], axis=1).drop_duplicates('CrcAlloSiteID')
# Save results
new_avr, _ = mssql.update_from_difference(avr7, param['output']['server'], param['output']['database'], 'AllocatedRateVolume', on='CrcAlloSiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'AllocatedRateVolume', 'pass', '{} rows updated'.format(len(new_avr)), username=param['output']['username'], password=param['output']['password'])
#################################################
### ConsentedRateVolume
print('--Update Consent tables')
## Clean data
crv1 = db.consented_takes.copy()
crv1['RecordNumber'] = crv1['RecordNumber'].str.strip().str.upper()
crv1['take_type'] = crv1['take_type'].str.strip().str.title()
crv1['LowflowCondition'] = crv1['LowflowCondition'].str.strip().str.upper()
crv1['ConsentedAnnualVolume'] = pd.to_numeric(crv1['ConsentedAnnualVolume'], errors='coerce').round()
crv1['ConsentedMultiDayVolume'] = pd.to_numeric(crv1['ConsentedMultiDayVolume'], errors='coerce').round()
crv1['ConsentedMultiDayPeriod'] = pd.to_numeric(crv1['ConsentedMultiDayPeriod'], errors='coerce').round()
crv1['ConsentedRate'] = pd.to_numeric(crv1['ConsentedRate'], errors='coerce')
crv1.loc[crv1['ConsentedMultiDayVolume'] <= 0, 'ConsentedMultiDayVolume'] = np.nan
crv1.loc[crv1['ConsentedMultiDayPeriod'] <= 0, 'ConsentedMultiDayPeriod'] = np.nan
crv1.loc[crv1['ConsentedRate'] <= 0, 'ConsentedRate'] = np.nan
crv1.loc[crv1['ConsentedAnnualVolume'] <= 0, 'ConsentedAnnualVolume'] = np.nan
crv1.loc[crv1['LowflowCondition'].isnull(), 'LowflowCondition'] = 'NO'
crv1.loc[(crv1['LowflowCondition'] == 'COMPLEX'), 'LowflowCondition'] = 'YES'
crv1.loc[crv1['LowflowCondition'] == 'NO', 'LowflowCondition'] = False
crv1.loc[crv1['LowflowCondition'] == 'YES', 'LowflowCondition'] = True
## Filter data
crv2 = crv1[crv1.ConsentedRate.notnull()]
## Check foreign keys
crv2 = crv2[crv2.RecordNumber.isin(crc1)].copy()
## Aggregate take types for counts and min/max month
grp4 = wa4.groupby(['RecordNumber', 'take_type', 'WAP'])
mon_min = grp4['FromMonth'].min()
mon_min.name = 'FromMonth'
mon_max = grp4['ToMonth'].max()
mon_max.name = 'ToMonth'
mon_min_max = pd.concat([mon_min, mon_max], axis=1)
mon_min_max1 = mon_min_max.reset_index()
grp5 = mon_min_max1.groupby(['RecordNumber', 'take_type'])
mon_min_max1['wap_count'] = grp5['WAP'].transform('count')
## Distribute WAPs to consents
crv3 = pd.merge(crv2, mon_min_max1, on=['RecordNumber', 'take_type'])
crv3[['ConsentedAnnualVolume', 'ConsentedMultiDayVolume']] = crv3[['ConsentedAnnualVolume', 'ConsentedMultiDayVolume']].divide(crv3['wap_count'], 0).round()
crv3['ConsentedRate'] = crv3['ConsentedRate'].divide(crv3['wap_count'], 0).round(2)
## Convert take types to ActivityID
take_types1 = act_types1[act_types1.ActivityType == 'Take'].copy()
crv4 = pd.merge(crv3.drop('wap_count', axis=1), take_types1[['ActivityID', 'ActivityName']], left_on='take_type', right_on='ActivityName').drop(['take_type', 'ActivityName'], axis=1)
## Convert WAPs to SiteIDs
crv5 = pd.merge(crv4, wap_site, on='WAP').drop('WAP', axis=1)
## Create CrcActSite table
crc_act = crv5[['RecordNumber', 'ActivityID', 'SiteID']].copy()
crc_act['SiteActivity'] = True
crc_act['SiteType'] = 'WAP'
# Save results
new_crc_act, rem_crc_act = mssql.update_from_difference(crc_act, param['output']['server'], param['output']['database'], 'CrcActSite', on=['RecordNumber', 'ActivityID', 'SiteID'], mod_date_col='ModifiedDate', where_cols=['RecordNumber', 'ActivityID', 'SiteID', 'SiteType'], username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'CrcActSite', 'pass', '{} rows updated'.format(len(new_crc_act)), username=param['output']['username'], password=param['output']['password'])
# Read db table
act_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcActSite', ['CrcActSiteID', 'RecordNumber', 'ActivityID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])
# Remove old data if needed
if not rem_crc_act.empty:
rem_crc_act1 = pd.merge(act_site0, rem_crc_act, on=['RecordNumber', 'ActivityID', 'SiteID']).drop(['RecordNumber', 'ActivityID', 'SiteID'], axis=1)
del_stmt = "delete from {table} where {col} in ({val})"
# del_stmt1 = del_stmt.format(table='ConsentedAttributes', col='CrcActSiteID', val=', '.join(rem_crc_act1.CrcActSiteID.astype(str).tolist()))
# mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt1, username=param['output']['username'], password=param['output']['password'])
#
# del_stmt2a = del_stmt.format(table='LinkedPermits', col='CrcActSiteID', val=', '.join(rem_crc_act1.CrcActSiteID.astype(str).tolist()))
# mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt2a, username=param['output']['username'], password=param['output']['password'])
#
# del_stmt2b = del_stmt.format(table='LinkedPermits', col='OtherCrcActSiteID', val=', '.join(rem_crc_act1.CrcActSiteID.astype(str).tolist()))
# mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt2b, username=param['output']['username'], password=param['output']['password'])
del_stmt3 = del_stmt.format(table='ConsentedRateVolume', col='CrcActSiteID', val=', '.join(rem_crc_act1.CrcActSiteID.astype(str).tolist()))
mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt3, username=param['output']['username'], password=param['output']['password'])
# del_stmt4 = del_stmt.format(table='CrcActSite', col='CrcActSiteID', val=', '.join(rem_crc_act1.CrcActSiteID.astype(str).tolist()))
# mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt4, username=param['output']['username'], password=param['output']['password'])
act_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcActSite', ['CrcActSiteID', 'RecordNumber', 'ActivityID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])
## Create ConsentedRateVolume table
crv6 = pd.merge(crv5, act_site0, on=['RecordNumber', 'ActivityID', 'SiteID']).drop(['RecordNumber', 'ActivityID', 'SiteID', 'LowflowCondition'], axis=1)
# Save results
new_crv, _ = mssql.update_from_difference(crv6, param['output']['server'], param['output']['database'], 'ConsentedRateVolume', on='CrcActSiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ConsentedRateVolume', 'pass', '{} rows updated'.format(len(new_crv)), username=param['output']['username'], password=param['output']['password'])
###########################################
### Diverts
## Clean
div1 = db.divert.copy()
div1['RecordNumber'] = div1['RecordNumber'].str.strip().str.upper()
div1['DivertType'] = div1['DivertType'].str.strip().str.title()
div1['LowflowCondition'] = div1['LowflowCondition'].str.strip().str.upper()
div1['ConsentedMultiDayVolume'] = pd.to_numeric(div1['ConsentedMultiDayVolume'], errors='coerce').round()
div1['ConsentedMultiDayPeriod'] = pd.to_numeric(div1['ConsentedMultiDayPeriod'], errors='coerce').round()
div1['ConsentedRate'] = pd.to_numeric(div1['ConsentedRate'], errors='coerce').round(2)
div1.loc[div1['ConsentedMultiDayVolume'] <= 0, 'ConsentedMultiDayVolume'] = np.nan
div1.loc[div1['ConsentedMultiDayPeriod'] <= 0, 'ConsentedMultiDayPeriod'] = np.nan
div1.loc[div1['ConsentedRate'] <= 0, 'ConsentedRate'] = np.nan
div1.loc[div1['LowflowCondition'].isnull(), 'LowflowCondition'] = 'NO'
div1.loc[(~div1['LowflowCondition'].isin(['NO', 'YES'])), 'LowflowCondition'] = 'YES'
div1.loc[div1['LowflowCondition'] == 'NO', 'LowflowCondition'] = False
div1.loc[div1['LowflowCondition'] == 'YES', 'LowflowCondition'] = True
div1['WAP'] = div1['WAP'].str.strip().str.upper()
div1.loc[~div1.WAP.str.contains('[A-Z]+\d\d/\d\d\d\d'), 'WAP'] = np.nan
## Filter
div2 = div1[div1.WAP.notnull()]
## Check foreign keys
div2 = div2[div2.RecordNumber.isin(crc1)].copy()
## Check primary keys
div2 = div2.drop_duplicates(['RecordNumber', 'WAP'])
## Join to get the IDs and filter WAPs
div3 = pd.merge(div2, act_types1[['ActivityID', 'ActivityName']], left_on='DivertType', right_on='ActivityName').drop(['DivertType', 'ActivityName'], axis=1)
div3 = pd.merge(div3, wap_site, on='WAP').drop('WAP', axis=1)
## CrcActSite
crc_act_div = div3[['RecordNumber', 'ActivityID', 'SiteID']].copy()
crc_act_div['SiteActivity'] = True
crc_act_div['SiteType'] = 'WAP'
# Save results
new_crc_div, rem_crc_div = mssql.update_from_difference(crc_act_div, param['output']['server'], param['output']['database'], 'CrcActSite', on=['RecordNumber', 'ActivityID', 'SiteID'], mod_date_col='ModifiedDate', where_cols=['RecordNumber', 'ActivityID', 'SiteID', 'SiteType'], username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'CrcActSite', 'pass', '{} rows updated'.format(len(new_crc_div)), username=param['output']['username'], password=param['output']['password'])
# Read db table
act_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcActSite', ['CrcActSiteID', 'RecordNumber', 'ActivityID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])
## ConsentedRateVolume
crc_div = pd.merge(div3, act_site0, on=['RecordNumber', 'ActivityID', 'SiteID']).drop(['RecordNumber', 'ActivityID', 'SiteID', 'LowflowCondition'], axis=1).dropna(subset=['ConsentedRate', 'ConsentedMultiDayVolume'], how='all')
crc_div['FromMonth'] = 1
crc_div['ToMonth'] = 12
# Save results
new_crc_div, _ = mssql.update_from_difference(crc_div, param['output']['server'], param['output']['database'], 'ConsentedRateVolume', on='CrcActSiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ConsentedRateVolume', 'pass', '{} rows updated'.format(len(new_crc_div)), username=param['output']['username'], password=param['output']['password'])
###########################################
### Water use types
wu1 = db.water_use.copy()
## Clean
wu1['RecordNumber'] = wu1['RecordNumber'].str.strip().str.upper()
wu1['UseType'] = wu1['UseType'].str.strip().str.title()
wu1['ConsentedMultiDayVolume'] = | pd.to_numeric(wu1['ConsentedMultiDayVolume'], errors='coerce') | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 24 19:34:09 2017
@author: daniel
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import json
import io
import scipy.optimize as opt
def fitfunc(x, A):
return A*x
plot_dict={}
f=plt.figure()
propagation_dir="propagation"
s_params = np.loadtxt(os.path.join(propagation_dir, "S_parameters_Delay007.tsv"))
polar_fit = np.loadtxt(os.path.join(propagation_dir, "PolarAngleFit.txt"))
polar = np.loadtxt(os.path.join(propagation_dir, "PolarAngle.txt"),skiprows=233)
erf1 = np.loadtxt(os.path.join(propagation_dir, "Erf-fit1.txt"))
erf2 = np.loadtxt(os.path.join(propagation_dir, "Erf-fit2.txt"))
#h3_err=pd.DataFrame(index=densitys/1e18,data=phases[:,2])
#
#h4=pd.DataFrame(index=densitys/1e18,data=phases[:,3])
#h4_err=pd.DataFrame(index=densitys/1e18,data=phases[:,4])
#
#
#popt, pcov = opt.curve_fit(fitfunc, densitys/1e18, phases[:,1])
#popt2, pcov2 = opt.curve_fit(fitfunc, densitys/1e18, phases[:,3])
#popt3, pcov3 = opt.curve_fit(fitfunc, densitys/1e18, phases[:,5])
#
#ds=np.linspace(0,2.2,100)
#
h=pd.DataFrame(index=erf1[:,0],data=erf1[:,1])
h2=pd.DataFrame(index=erf2[:,0],data=erf2[:,1])
h3=pd.DataFrame(index=s_params[:,0],data=s_params[:,7])
h3_err=pd.DataFrame(index=s_params[:,0],data=np.sqrt(s_params[:,7]))
h4=pd.DataFrame(index=s_params[:,0],data=s_params[:,8])
h4_err=pd.DataFrame(index=s_params[:,0],data=np.sqrt(s_params[:,8]))
plot_dict['121']={
'A':{'type':'plot','y':h[0].to_json()},
'B':{'type':'plot','y':h2[0].to_json()},
'C':{'type':'errorbar','y':h3[0].to_json(),'yerr':h3_err[0].to_json(),'ylabel':u'Ereignisse','xlabel':u'Zeit ($\mu s$)','num':'a','xlim':(11.,14.5),'ylim':(0,80),'label':'Daten'},
'D':{'type':'errorbar','y':h4[0].to_json(),'yerr':h4_err[0].to_json()}
}
plt.subplot(121)
plt.plot(erf1[:,0], erf1[:,1], ls='-',lw=1.5,c='r')
plt.plot(erf2[:,0], erf2[:,1], ls='-',lw=1.5,c='b')
plt.scatter(s_params[:,0], s_params[:,7], marker='o',c='r')
plt.scatter(s_params[:,0], s_params[:,8], marker='o',c='b')
plt.xlim((11.,14.5))
start=47
polar_start = 8
h= | pd.DataFrame(index=polar[:start,0],data=polar[:start,1]) | pandas.DataFrame |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import datetime
import pandas as pd
from dateutil.relativedelta import relativedelta
from collections import Iterable
ALLOWED_TIME_COLUMN_TYPES = [
pd.Timestamp,
pd.DatetimeIndex,
datetime.datetime,
datetime.date,
]
def is_datetime_like(x):
"""Function that checks if a data frame column x is of a datetime type."""
return any(isinstance(x, col_type) for col_type in ALLOWED_TIME_COLUMN_TYPES)
def get_datetime_col(df, datetime_colname):
"""
Helper function for extracting the datetime column as datetime type from
a data frame.
Args:
df: pandas DataFrame containing the column to convert
datetime_colname: name of the column to be converted
Returns:
pandas.Series: converted column
Raises:
Exception: if datetime_colname does not exist in the dateframe df.
Exception: if datetime_colname cannot be converted to datetime type.
"""
if datetime_colname in df.index.names:
datetime_col = df.index.get_level_values(datetime_colname)
elif datetime_colname in df.columns:
datetime_col = df[datetime_colname]
else:
raise Exception("Column or index {0} does not exist in the data " "frame".format(datetime_colname))
if not is_datetime_like(datetime_col):
datetime_col = pd.to_datetime(df[datetime_colname])
return datetime_col
def get_month_day_range(date):
"""
Returns the first date and last date of the month of the given date.
"""
# Replace the date in the original timestamp with day 1
first_day = date + relativedelta(day=1)
# Replace the date in the original timestamp with day 1
# Add a month to get to the first day of the next month
# Subtract one day to get the last day of the current month
last_day = date + relativedelta(day=1, months=1, days=-1, hours=23)
return first_day, last_day
def split_train_validation(df, fct_horizon, datetime_colname):
"""
Splits the input dataframe into train and validate folds based on the
forecast creation time (fct) and forecast horizon specified by fct_horizon.
Args:
df: The input data frame to split.
fct_horizon: list of tuples in the format of
(fct, (forecast_horizon_start, forecast_horizon_end))
datetime_colname: name of the datetime column
Note: df[datetime_colname] needs to be a datetime type.
"""
i_round = 0
for fct, horizon in fct_horizon:
i_round += 1
train = df.loc[df[datetime_colname] < fct].copy()
validation = df.loc[(df[datetime_colname] >= horizon[0]) & (df[datetime_colname] <= horizon[1]),].copy()
yield i_round, train, validation
def add_datetime(input_datetime, unit, add_count):
"""
Function to add a specified units of time (years, months, weeks, days,
hours, or minutes) to the input datetime.
Args:
input_datetime: datatime to be added to
unit: unit of time, valid values: 'year', 'month', 'week',
'day', 'hour', 'minute'.
add_count: number of units to add
Returns:
New datetime after adding the time difference to input datetime.
Raises:
Exception: if invalid unit is provided. Valid units are:
'year', 'month', 'week', 'day', 'hour', 'minute'.
"""
if unit == "Y":
new_datetime = input_datetime + relativedelta(years=add_count)
elif unit == "M":
new_datetime = input_datetime + relativedelta(months=add_count)
elif unit == "W":
new_datetime = input_datetime + relativedelta(weeks=add_count)
elif unit == "D":
new_datetime = input_datetime + relativedelta(days=add_count)
elif unit == "h":
new_datetime = input_datetime + relativedelta(hours=add_count)
elif unit == "m":
new_datetime = input_datetime + relativedelta(minutes=add_count)
else:
raise Exception(
"Invalid backtest step unit, {}, provided. Valid " "step units are Y, M, W, D, h, " "and m".format(unit)
)
return new_datetime
def convert_to_tsdf(input_df, time_col_name, time_format):
"""
Convert a time column in a data frame to monotonically increasing time
index.
Args:
input_df(pandas.DataFrame): Input data frame to convert.
time_col_name(str): Name of the time column to use as index.
time_format(str): Format of the time column.
Returns:
pandas.DataFrame: A new data frame with the time column of the input
data frame set as monotonically increasing index.
"""
output_df = input_df.copy()
if not is_datetime_like(output_df[time_col_name]):
output_df[time_col_name] = pd.to_datetime(output_df[time_col_name], format=time_format)
output_df.set_index(time_col_name, inplace=True)
if not output_df.index.is_monotonic:
output_df.sort_index(inplace=True)
return output_df
def is_iterable_but_not_string(obj):
"""
Determine if an object has iterable, list-like properties.
Importantly, this functions *does not* consider a string
to be list-like, even though Python strings are iterable.
"""
return isinstance(obj, Iterable) and not isinstance(obj, str)
def get_offset_by_frequency(frequency):
frequency_to_offset_map = {
"B": pd.offsets.BDay(),
"C": pd.offsets.CDay(),
"W": pd.offsets.Week(),
"WOM": pd.offsets.WeekOfMonth(),
"LWOM": pd.offsets.LastWeekOfMonth(),
"M": pd.offsets.MonthEnd(),
"MS": pd.offsets.MonthBegin(),
"BM": pd.offsets.BMonthEnd(),
"BMS": pd.offsets.BMonthBegin(),
"CBM": pd.offsets.CBMonthEnd(),
"CBMS": pd.offsets.CBMonthBegin(),
"SM": pd.offsets.SemiMonthEnd(),
"SMS": pd.offsets.SemiMonthBegin(),
"Q": pd.offsets.QuarterEnd(),
"QS": pd.offsets.QuarterBegin(),
"BQ": pd.offsets.BQuarterEnd(),
"BQS": pd.offsets.BQuarterBegin(),
"REQ": | pd.offsets.FY5253Quarter() | pandas.offsets.FY5253Quarter |
import pandas
import requests
url = 'https://reqres.in/api/users?page=2'
# Получаем данные из API
response = requests.get(url).json()
# Получаем информацию о пользователях
data = response['data']
# Выводим данные о пользователях
for user in data:
print(user)
# Загружаем данные о пользователях в dataframe
dataframe = | pandas.DataFrame.from_records(data) | pandas.DataFrame.from_records |
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import (QInputDialog,QFileDialog,QMessageBox,QWidget)
from rev003p9_finalysing_stdf_reader import READ_THE_STDF_FILE
from excel_sheet_selector import Ui_Sheet_selector
import time,sys
import pandas as pd
from pandas import DataFrame as DF
class Ui_Load_STDF( QtWidgets.QDialog):
File_Name_to_export=QtCore.pyqtSignal(list)
def __init__(self,parent=None):
super(Ui_Load_STDF,self).__init__()
self.File_name=""
self.Parent_window=parent
self.Imported_file_data=[]
self.STDF_DATA={}
#self.Raw_Data_DF=None
'''self.LdStdf = QtWidgets.QDialog()
self.setupUi(self.LdStdf)'''
def setupUi(self, Load_STDF):
global Load_STDF1
Load_STDF1=Load_STDF
Load_STDF.setObjectName("Load_STDF")
Load_STDF.resize(581, 89)
self.Load_group_box = QtWidgets.QGroupBox(Load_STDF)
self.Load_group_box.setGeometry(QtCore.QRect(10, 0, 561, 81))
self.Load_group_box.setTitle("")
self.Load_group_box.setObjectName("groupBox")
self.Load_progressBar = QtWidgets.QProgressBar(self.Load_group_box)
self.Load_progressBar.setGeometry(QtCore.QRect(10, 50, 471, 16))
self.Load_progressBar.setProperty("value", 0)
self.Load_progressBar.setMaximum(100)
self.Load_progressBar.setObjectName("progressBar")
self.Import_button = QtWidgets.QPushButton(self.Load_group_box)
self.Import_button.setGeometry(QtCore.QRect(480, 10, 75, 23))
self.Import_button.setShortcut("Ctrl+Shift+I")
self.Import_button.setObjectName("Import_button")
self.File_name=self.Import_button.clicked.connect(self.openFileNameDialog)
#self.pushButton.clicked.connect()
self.Input_textBox = QtWidgets.QLineEdit(self.Load_group_box)
self.Input_textBox.setGeometry(QtCore.QRect(60, 10, 411, 31))
self.Input_textBox.setObjectName("Input_textBox")
self.Input_txtbx_lbl = QtWidgets.QLabel(self.Load_group_box)
self.Input_txtbx_lbl.setGeometry(QtCore.QRect(10, 20, 47, 13))
self.Input_txtbx_lbl.setObjectName("Input_txtbx_lbl")
self.Load_button = QtWidgets.QPushButton(self.Load_group_box)
self.Load_button.setGeometry(QtCore.QRect(480, 50, 75, 23))
self.Load_button.setObjectName("Load_button")
self.Load_button.clicked.connect(self.Load_Action)
self.retranslateUi(Load_STDF)
QtCore.QMetaObject.connectSlotsByName(Load_STDF)
def retranslateUi(self, Load_STDF):
_translate = QtCore.QCoreApplication.translate
Load_STDF.setWindowTitle(_translate("Load_STDF", "Load File"))
self.Import_button.setText(_translate("Load_STDF", "Import"))
self.Input_txtbx_lbl.setText(_translate("Load_STDF", "Load_File"))
self.Load_button.setText(_translate("Load_STDF", "Load"))
def openFileNameDialog(self):
'''Opens Files diralog to browse the data'''
fileName=QFileDialog.getOpenFileName(caption='Open file',directory='',filter="STDF Files (*.std *.stdf);;Excel Files (*.xlsx *.xls);;CSV Files (*.csv);;All Files (*.*)")
if fileName[0]!='':
self.Input_textBox.setText(fileName[0])
self.File_name=fileName[0]
self.File_Type=fileName[1]
else:
msg='Please select a File'
k=self.Messagebox(msg,'criti','Please select a File')
return fileName[0]
def Messagebox(self,msg_text,msgtype='info',title='Message Box'):
''' Message is a function to call the message box in PyQt Quickly. msgtype can be (info, warn,que,criti).
msg_text is the msg you want to display, Title will be window Title'''
if msgtype=='info':
reply=QMessageBox.information(self,title,msg_text,QMessageBox.Ok ,QMessageBox.Ok)
elif msgtype=='warn':
reply=QMessageBox.warning(self,title,msg_text,QMessageBox.Ok ,QMessageBox.Ok)
elif msgtype=='que':
reply=QMessageBox.question(self,title,msg_text,QMessageBox.Yes | QMessageBox.No ,QMessageBox.Yes)
elif msgtype=='criti':
reply=QMessageBox.critical(self,title,msg_text,QMessageBox.Ok | QMessageBox.Cancel ,QMessageBox.Ok)
return reply
def Store_the_Data(self):
''' Stores the loaded Data into Dictnory for ease of access'''
# self.STDF_DATA={}
#self.Load_window.File_Name_to_export.connect(self.Imported_file_data.append)
#self.Load_window.File_Name_to_export.connect(self.Ldd())
if self.Parent_window!=None:
self.Parent_window.File_Path.append(self.File_name)
self.Rec_Summary_list=['FAR_Rec_summary','ATR_Rec_summary','MIR_Rec_Summary','SDR_Rec_Summary', 'PMR_Rec_Summary','WCR_Rec_Summary'
,'WIR_Rec_Summary','PIR_Rec_Summary','PRR_Rec_Summary','MPR_Rec_Summary','WRR_Rec_Summary','TSR_Rec_Summary','HBR_Rec_Summary',
'SBR_Rec_Summary','PCR_Rec_Summary','MRR_Rec_Summary','BPS_Rec_Summary','DTR_Rec_Summary','PGR_Rec_Summary', 'RDR_Rec_Summary'
,'GDR_Rec_Summary','Test_Details','Test_Flag_Details','PTR_Rec_Summary','FTR_Rec_Summary','Full_Rec_Summary','Test_Limit_Details']
if len(self.Raw_Data.Clubbed_Record_Details)>1:
for i in range(len(self.Rec_Summary_list)):
self.STDF_DATA[self.Rec_Summary_list[i]]=self.Raw_Data.Clubbed_Record_Details[i]
if self.Parent_window!=None:
self.Parent_window.Loaded_Data_File_count.append('File_'+str(len(self.Parent_window.Loaded_Data_File_count)+1))
self.Parent_window.Loaded_Data_Files[self.Parent_window.Loaded_Data_File_count[len(self.Parent_window.Loaded_Data_File_count)-1]]=self.STDF_DATA
self.Parent_window.Loaded_Data_File_Raw_Data[self.Parent_window.Loaded_Data_File_count[len(self.Parent_window.Loaded_Data_File_count)-1]]= | DF(self.Raw_Data.Full_Rec_Summary) | pandas.DataFrame |
import tempfile
from os.path import abspath, exists, splitext
from os import remove
import geopandas as gpd
import pandas as pd
import pandas.core.algorithms as algos
import numpy as np
from shapely.geometry import Polygon
import logging
from uncoverml.config import ConfigException
BIN = 'bin'
GEOMETRY = 'geometry'
log = logging.getLogger(__name__)
def filter_fields(fields_to_keep, input_shapefile):
gdf = gpd.read_file(input_shapefile)
fields_to_keep = [GEOMETRY] + list(fields_to_keep) # add geometry
original_fields = gdf.columns
for f in fields_to_keep:
if f not in original_fields:
raise RuntimeError("field '{}' must exist in shapefile".format(f))
gdf_out = gdf[fields_to_keep]
return gdf_out
def strip_shapefile(input_shapefile, output_shapefile, *fields_to_keep):
"""
Parameters
----------
input_shapefile
output_shapefile
args features to keep in the output shapefile
Returns
-------
"""
gdf_out = filter_fields(fields_to_keep, input_shapefile)
gdf_out.to_file(output_shapefile)
def resample_by_magnitude(input_shapefile, output_shapefile,
target_field, bins=10,
fields_to_keep=[], bootstrap=True,
output_samples=None,
validation_file=None,
validation_points=100):
"""
Parameters
----------
input_shapefile: str
output_shapefile: str
target_field: str
target field name based on which resampling is performed. Field must
exist in the input_shapefile
bins: int
number of bins for sampling
fields_to_keep: list
of strings to store in the output shapefile
bootstrap: bool, optional
whether to sample with replacement or not
output_samples: int, optional
number of samples in the output shpfile. If not provided, the output
samples will be assumed to be the same as the original shapefile
validation_file: str, optional
validation file name
validation_points: int, optional
approximate number of points in the validation shapefile
Returns
-------
"""
log.info("resampling shapefile by values")
if bootstrap and validation_file:
raise ValueError('bootstrapping should not be use while'
'creating a validation shapefile.')
if len(fields_to_keep):
fields_to_keep.append(target_field)
else:
fields_to_keep = [target_field]
gdf_out = filter_fields(fields_to_keep, input_shapefile)
# the idea is stolen from pandas.qcut
# pd.qcut does not work for cases when it result in non-unique bin edges
target = gdf_out[target_field].values
bin_edges = algos.quantile(
np.unique(target), np.linspace(0, 1, bins+1))
result = pd.tools.tile._bins_to_cuts(target, bin_edges,
labels=False,
include_lowest=True)
# add to output df for sampling
gdf_out[BIN] = result
dfs_to_concat = []
validation_dfs_to_concat = []
total_samples = output_samples if output_samples else gdf_out.shape[0]
samples_per_bin = total_samples // bins
validate_array = np.ones(bins, dtype=np.bool)
if validation_file and bins > validation_points:
validate_array[validation_points:] = False
np.random.shuffle(validate_array)
gb = gdf_out.groupby(BIN)
for i, (b, gr) in enumerate(gb):
if bootstrap:
dfs_to_concat.append(gr.sample(n=samples_per_bin,
replace=bootstrap))
else:
_df, v_df = _sample_without_replacement(gr, samples_per_bin,
validate_array[i])
dfs_to_concat.append(_df)
validation_dfs_to_concat.append(v_df)
final_df = | pd.concat(dfs_to_concat) | pandas.concat |
import pandas as pd
import pickle
from joblib import Parallel, delayed
N_JOBS=8
def find_all_brands(names_series, brands_re):
return names_series.str.findall(brands_re)
def find_first_brand(names_series, brands_re):
brands = find_all_brands(names_series, brands_re)
first_brand = brands.map(
lambda b: b[0] if len(b) > 0 else 'отсутствие бренда'
)
return first_brand
def find_parallel(names_series, brands_re, n_jobs=N_JOBS):
n_batches = n_jobs
batches = [names_series.iloc[i::n_batches] for i in range(n_batches)]
brand = Parallel(n_jobs=n_jobs)(
delayed(find_first_brand)(batch, brands_re)
for batch in batches
)
brand = pd.concat(brand)
item_brand_mapping = | pd.concat([names_series, brand], axis=1, ignore_index=False) | pandas.concat |
import pandas
import math
category2abbreviation = {
'fully supervised' : 'FS',
'knowledge-based with supervision' : 'KB (S)',
'semi-supervised' : 'Semi-S',
'unsupervised knowledge-based' : 'KB (U)'}
ordered_types = ['FS', 'KB (S)', 'KB (U)', 'Semi-S']
def system_label2color(path):
"""
map system to color
:param str path: path to 'System.xslx'
:rtype: dict
:return: system -> color
"""
df = pandas.read_excel(path)
abbr_cat2color = {
'FS': '#0173B2', # mediumblue
'KB (S)': '#DE8F05', # orange
'KB (U)': '#029E73', # green
'Semi-S': '#D55E00' # red
}
system2color = dict()
for index, row in df.iterrows():
label = row['system']
full_cat = row['category']
abbr_cat = category2abbreviation[full_cat]
color = abbr_cat2color[abbr_cat]
system2color[label] = color
return system2color
def extract_relevant_rows(df, sense_repository, the_competitions):
"""
:param pandas.DataFrame df: dataframe with wsd state of the art data
:param str sense_repository: WordNet | BabelNet
:param set the_competitions: set of allowed competitions
:rtype: tuple
:return: (set of systems, df with relevant info)
"""
rows = []
headers = df.columns
for index, row in df.iterrows():
if all([row['competition'] in the_competitions,
row['sense_repository'] == sense_repository]):
rows.append(row)
comp_df = pandas.DataFrame(rows, columns=headers)
systems = set(comp_df.label)
return (systems, comp_df)
def load_relevant_data_from_rows(comp_df, competition):
"""
extract from system with best performing setting:
a) year
b) name of system
c) f1
:param pandas.DataFrame comp_df: output 'extract_relevant_rows' function
:rtype: pandas.DataFrame
:return: df with relevant data
"""
list_of_lists = []
headers = ['Year', 'F1', 'System', 'Competition', 'In_competition']
for system in set(comp_df.label):
system_df = comp_df[comp_df.label == system]
if all([system == 'Google-LSTM',
competition in {'se2-aw', 'se2-aw-v2'}]):
continue
highest_row = system_df['F1'].idxmax()
if math.isnan(highest_row):
print(f'no F1 value for {system}')
continue
rows = system_df.loc[[highest_row]]
assert len(rows) == 1
for index, row in rows.iterrows():
pass
shortname = row['label']
setting = row['setting']
f1 = row['F1'] / 100
year = row['year']
#if type(setting) == str:
# label = f'{shortname} ({setting})'
#else:
# label = f'{shortname}'
label = f'{shortname}'
one_row = [int(year), float(f1), str(label), competition, row['in_competition']]
list_of_lists.append(one_row)
stats_df = | pandas.DataFrame(list_of_lists, columns=headers) | pandas.DataFrame |
from datetime import datetime
startTime = datetime.now()
import json
import glob
import os
import pandas as pd
import tensorflow as tf
import tensorflowjs as tfjs
from tensorflow import keras
from sklearn.model_selection import train_test_split
import requests
EPOCHS = 9
CLASSES = 2
"""
Build and return the Keras model ready to fit
"""
def build_classification_model(X_train):
model = keras.Sequential([
keras.layers.Dense(64, activation=tf.nn.sigmoid, input_shape=(X_train.shape[1],)),
keras.layers.Dense(64, activation=tf.nn.sigmoid),
keras.layers.Dense(64, activation=tf.nn.sigmoid),
keras.layers.Dense(CLASSES, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
"""
get the percent ontime of a particular airline by id
"""
def get_airline_percent_ontime(id, airlines):
for obj in airlines:
if obj['airline_id'] == id:
return obj['airline_percent_ontime_arrival']
"""
get the percent ontime of a particular airport by id
"""
def get_airport_percent_ontime(id, airports):
for obj in airports:
if obj['airport_id'] == id:
return obj['airport_percent_ontime_departure']
"""
create the classes for classifiying each departure or arrival time as
ontime or late
"""
def create_classes(y):
for i in range(len(y)):
if y[i] < 10:
y[i] = 0
else:
y[i] = 1
return y
"""
create the classes and split the data into training and testing
"""
def prepare_data(X, y):
y = y.tolist()
y = create_classes(y)
return train_test_split(X, y, test_size=0.2, random_state=42)
"""
Run the program to laod data, create and fit model, test, and save model as json
"""
print('Getting airport and airline metadata from FlyGenius API...', end=' ')
r = requests.get('https://api.flygeni.us/airports/?use_details=True')
airports = r.json()
r = requests.get('https://api.flygeni.us/airlines/?use_details=True')
airlines = r.json()
print('done!\nLoading raw flight data from CSV files...', end=' ')
path = os.path.normpath(os.path.join(os.getcwd(), 'data/flight-data/*_2017_*/*.csv'))
all_data = glob.glob(path)
loaded_data = []
for path in all_data:
this_data = pd.read_csv(path, skipinitialspace=True, low_memory=False)
loaded_data.append(this_data)
all_df = | pd.concat(loaded_data) | pandas.concat |
import collections
import logging
import os
import pprint
from typing import Any, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import pytest
import core.artificial_signal_generators as cartif
import core.signal_processing as csigna
import helpers.git as git
import helpers.printing as hprint
import helpers.unit_test as hut
_LOG = logging.getLogger(__name__)
class Test__compute_lagged_cumsum(hut.TestCase):
def test1(self) -> None:
input_df = self._get_df()
output_df = csigna._compute_lagged_cumsum(input_df, 3)
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
def test2(self) -> None:
input_df = self._get_df()
input_df.columns = ["x", "y1", "y2"]
output_df = csigna._compute_lagged_cumsum(input_df, 3, ["y1", "y2"])
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
def test_lag_1(self) -> None:
input_df = self._get_df()
input_df.columns = ["x", "y1", "y2"]
output_df = csigna._compute_lagged_cumsum(input_df, 1, ["y1", "y2"])
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
@staticmethod
def _get_df() -> pd.DataFrame:
df = pd.DataFrame([list(range(10))] * 3).T
df[1] = df[0] + 1
df[2] = df[0] + 2
df.index = pd.date_range(start="2010-01-01", periods=10)
df.rename(columns=lambda x: f"col_{x}", inplace=True)
return df
class Test_correlate_with_lagged_cumsum(hut.TestCase):
def test1(self) -> None:
input_df = self._get_arma_df()
output_df = csigna.correlate_with_lagged_cumsum(
input_df, 3, y_vars=["y1", "y2"]
)
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
def test2(self) -> None:
input_df = self._get_arma_df()
output_df = csigna.correlate_with_lagged_cumsum(
input_df, 3, y_vars=["y1"], x_vars=["x"]
)
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
@staticmethod
def _get_arma_df(seed: int = 0) -> pd.DataFrame:
arma_process = cartif.ArmaProcess([], [])
date_range = {"start": "2010-01-01", "periods": 40, "freq": "M"}
srs1 = arma_process.generate_sample(
date_range_kwargs=date_range, scale=0.1, seed=seed
).rename("x")
srs2 = arma_process.generate_sample(
date_range_kwargs=date_range, scale=0.1, seed=seed + 1
).rename("y1")
srs3 = arma_process.generate_sample(
date_range_kwargs=date_range, scale=0.1, seed=seed + 2
).rename("y2")
return pd.concat([srs1, srs2, srs3], axis=1)
class Test_accumulate(hut.TestCase):
def test1(self) -> None:
srs = pd.Series(
range(0, 20), index=pd.date_range("2010-01-01", periods=20)
)
actual = csigna.accumulate(srs, num_steps=1)
expected = srs.astype(float)
pd.testing.assert_series_equal(actual, expected)
def test2(self) -> None:
idx = pd.date_range("2010-01-01", periods=10)
srs = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], index=idx)
actual = csigna.accumulate(srs, num_steps=2)
expected = pd.Series([np.nan, 1, 3, 5, 7, 9, 11, 13, 15, 17], index=idx)
pd.testing.assert_series_equal(actual, expected)
def test3(self) -> None:
idx = pd.date_range("2010-01-01", periods=10)
srs = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], index=idx)
actual = csigna.accumulate(srs, num_steps=3)
expected = pd.Series(
[np.nan, np.nan, 3, 6, 9, 12, 15, 18, 21, 24], index=idx
)
pd.testing.assert_series_equal(actual, expected)
def test4(self) -> None:
srs = pd.Series(
np.random.randn(100), index=pd.date_range("2010-01-01", periods=100)
)
output = pd.concat([srs, csigna.accumulate(srs, num_steps=5)], axis=1)
output.columns = ["series", "series_accumulated"]
self.check_string(hut.convert_df_to_string(output, index=True))
def test_long_step1(self) -> None:
idx = pd.date_range("2010-01-01", periods=3)
srs = pd.Series([1, 2, 3], index=idx)
actual = csigna.accumulate(srs, num_steps=5)
expected = pd.Series([np.nan, np.nan, np.nan], index=idx)
pd.testing.assert_series_equal(actual, expected)
def test_nans1(self) -> None:
idx = pd.date_range("2010-01-01", periods=10)
srs = pd.Series([0, 1, np.nan, 2, 3, 4, np.nan, 5, 6, 7], index=idx)
actual = csigna.accumulate(srs, num_steps=3)
expected = pd.Series(
[
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
9,
np.nan,
np.nan,
np.nan,
18,
],
index=idx,
)
pd.testing.assert_series_equal(actual, expected)
def test_nans2(self) -> None:
idx = pd.date_range("2010-01-01", periods=6)
srs = pd.Series([np.nan, np.nan, np.nan, 2, 3, 4], index=idx)
actual = csigna.accumulate(srs, num_steps=3)
expected = pd.Series(
[np.nan, np.nan, np.nan, np.nan, np.nan, 9], index=idx
)
pd.testing.assert_series_equal(actual, expected)
def test_nans3(self) -> None:
idx = pd.date_range("2010-01-01", periods=6)
srs = pd.Series([np.nan, np.nan, np.nan, 2, 3, 4], index=idx)
actual = csigna.accumulate(srs, num_steps=2)
expected = pd.Series([np.nan, np.nan, np.nan, np.nan, 5, 7], index=idx)
pd.testing.assert_series_equal(actual, expected)
class Test_get_symmetric_equisized_bins(hut.TestCase):
def test_zero_in_bin_interior_false(self) -> None:
input_ = pd.Series([-1, 3])
expected = np.array([-3, -2, -1, 0, 1, 2, 3])
actual = csigna.get_symmetric_equisized_bins(input_, 1)
np.testing.assert_array_equal(actual, expected)
def test_zero_in_bin_interior_true(self) -> None:
input_ = | pd.Series([-1, 3]) | pandas.Series |
import sys
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, '../'))
from utilities import plot_x_y
from matplotlib import rc
import matplotlib.lines as mlines
rc('text', usetex=True)
plt.rcParams["font.family"] = "Times New Roman"
# Figure validating the diffusion numerical solution against comsole
table_coms = pd.read_csv('../inOut/conc_coord_comsole.csv')
df_coms = pd.DataFrame(table_coms)
#
table_dfvm = | pd.read_csv('../inOut/conc_coord_dfvm.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
German bank holiday.
"""
try:
from pandas import Timedelta
from pandas.tseries.offsets import Easter, Day, Week
from pandas.tseries.holiday import EasterMonday, GoodFriday, \
Holiday, AbstractHolidayCalendar
except ImportError:
print('Pandas could not be imported')
raise
from german_holidays.state_codes import STATE_CODE_MAP, StateCodeError
class ChristiHimmelfahrt(Easter):
def apply(*args, **kwargs):
new = Easter.apply(*args, **kwargs)
new += Timedelta('39d')
return new
class Pfingstsonntag(Easter):
def apply(*args, **kwargs):
new = Easter.apply(*args, **kwargs)
new += Timedelta('49d')
return new
class Pfingstmontag(Easter):
def apply(*args, **kwargs):
new = Easter.apply(*args, **kwargs)
new += Timedelta('50d')
return new
class Fronleichnam(Easter):
def apply(*args, **kwargs):
new = | Easter.apply(*args, **kwargs) | pandas.tseries.offsets.Easter.apply |
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
| This file contains SDC overloads for common algorithms used internally
"""
import numpy
import pandas
from pandas.core.indexing import IndexingError
import numba
from numba.misc import quicksort
from numba import types
from numba.core.errors import TypingError
from numba.extending import register_jitable
from numba.np import numpy_support
from numba.typed import Dict
import sdc
from sdc.hiframes.api import isna
from sdc.hiframes.pd_series_type import SeriesType
from sdc.functions import numpy_like
from sdc.str_arr_type import string_array_type, StringArrayType
from sdc.datatypes.range_index_type import RangeIndexType
from sdc.str_arr_ext import (num_total_chars, append_string_array_to,
str_arr_is_na, pre_alloc_string_array, str_arr_set_na, string_array_type,
cp_str_list_to_array, create_str_arr_from_list, get_utf8_size,
str_arr_set_na_by_mask)
from sdc.utilities.prange_utils import parallel_chunks
from sdc.utilities.utils import sdc_overload, sdc_register_jitable
from sdc.utilities.sdc_typing_utils import (find_common_dtype_from_numpy_dtypes,
TypeChecker)
class SDCLimitation(Exception):
"""Exception to be raised in case of SDC limitation"""
pass
def hpat_arrays_append(A, B):
pass
@sdc_overload(hpat_arrays_append, jit_options={'parallel': False})
def hpat_arrays_append_overload(A, B):
"""Function for appending underlying arrays (A and B) or list/tuple of arrays B to an array A"""
A_is_range_index = isinstance(A, RangeIndexType)
B_is_range_index = isinstance(B, RangeIndexType)
if isinstance(A, (types.Array, RangeIndexType)):
if isinstance(B, (types.Array, RangeIndexType)):
def _append_single_numeric_impl(A, B):
_A = A.values if A_is_range_index == True else A # noqa
_B = B.values if B_is_range_index == True else B # noqa
return numpy.concatenate((_A, _B,))
return _append_single_numeric_impl
elif isinstance(B, (types.UniTuple, types.List)) and isinstance(B.dtype, (types.Array, RangeIndexType)):
B_dtype_is_range_index = isinstance(B.dtype, RangeIndexType)
numba_common_dtype = find_common_dtype_from_numpy_dtypes([A.dtype, B.dtype.dtype], [])
# TODO: refactor to use numpy.concatenate when Numba supports building a tuple at runtime
def _append_list_numeric_impl(A, B):
total_length = len(A) + numpy.array([len(arr) for arr in B]).sum()
new_data = numpy.empty(total_length, numba_common_dtype)
stop = len(A)
_A = numpy.array(A) if A_is_range_index == True else A # noqa
new_data[:stop] = _A
for arr in B:
_arr = numpy.array(arr) if B_dtype_is_range_index == True else arr # noqa
start = stop
stop = start + len(_arr)
new_data[start:stop] = _arr
return new_data
return _append_list_numeric_impl
elif A == string_array_type:
if B == string_array_type:
def _append_single_string_array_impl(A, B):
total_size = len(A) + len(B)
total_chars = num_total_chars(A) + num_total_chars(B)
new_data = sdc.str_arr_ext.pre_alloc_string_array(total_size, total_chars)
pos = 0
pos += append_string_array_to(new_data, pos, A)
pos += append_string_array_to(new_data, pos, B)
return new_data
return _append_single_string_array_impl
elif (isinstance(B, (types.UniTuple, types.List)) and B.dtype == string_array_type):
def _append_list_string_array_impl(A, B):
array_list = [A] + list(B)
total_size = numpy.array([len(arr) for arr in array_list]).sum()
total_chars = numpy.array([num_total_chars(arr) for arr in array_list]).sum()
new_data = sdc.str_arr_ext.pre_alloc_string_array(total_size, total_chars)
pos = 0
pos += append_string_array_to(new_data, pos, A)
for arr in B:
pos += append_string_array_to(new_data, pos, arr)
return new_data
return _append_list_string_array_impl
@sdc_register_jitable
def fill_array(data, size, fill_value=numpy.nan, push_back=True):
"""
Fill array with given values to reach the size
"""
if push_back:
return numpy.append(data, numpy.repeat(fill_value, size - data.size))
return numpy.append(numpy.repeat(fill_value, size - data.size), data)
@sdc_register_jitable
def fill_str_array(data, size, push_back=True):
"""
Fill StringArrayType array with given values to reach the size
"""
string_array_size = len(data)
nan_array_size = size - string_array_size
num_chars = sdc.str_arr_ext.num_total_chars(data)
result_data = sdc.str_arr_ext.pre_alloc_string_array(size, num_chars)
# Keep NaN values of initial array
arr_is_na_mask = numpy.array([sdc.hiframes.api.isna(data, i) for i in range(string_array_size)])
data_str_list = sdc.str_arr_ext.to_string_list(data)
nan_list = [''] * nan_array_size
result_list = data_str_list + nan_list if push_back else nan_list + data_str_list
cp_str_list_to_array(result_data, result_list)
# Batch=64 iteration to avoid threads competition
batch_size = 64
if push_back:
for i in numba.prange(size//batch_size + 1):
for j in range(i*batch_size, min((i+1)*batch_size, size)):
if j < string_array_size:
if arr_is_na_mask[j]:
str_arr_set_na(result_data, j)
else:
str_arr_set_na(result_data, j)
else:
for i in numba.prange(size//batch_size + 1):
for j in range(i*batch_size, min((i+1)*batch_size, size)):
if j < nan_array_size:
str_arr_set_na(result_data, j)
else:
str_arr_j = j - nan_array_size
if arr_is_na_mask[str_arr_j]:
str_arr_set_na(result_data, j)
return result_data
@numba.njit
def _hpat_ensure_array_capacity(new_size, arr):
""" Function ensuring that the size of numpy array is at least as specified
Returns newly allocated array of bigger size with copied elements if existing size is less than requested
"""
k = len(arr)
if k >= new_size:
return arr
n = k
while n < new_size:
n = 2 * n
res = numpy.empty(n, arr.dtype)
res[:k] = arr[:k]
return res
def sdc_join_series_indexes(left, right):
pass
@sdc_overload(sdc_join_series_indexes, jit_options={'parallel': False})
def sdc_join_series_indexes_overload(left, right):
"""Function for joining arrays left and right in a way similar to pandas.join 'outer' algorithm"""
# check that both operands are of types used for representing Pandas indexes
if not (isinstance(left, (types.Array, StringArrayType, RangeIndexType))
and isinstance(right, (types.Array, StringArrayType, RangeIndexType))):
return None
convert_left = isinstance(left, RangeIndexType)
convert_right = isinstance(right, RangeIndexType)
def _convert_to_arrays_impl(left, right):
_left = left.values if convert_left == True else left # noqa
_right = right.values if convert_right == True else right # noqa
return sdc_join_series_indexes(_left, _right)
if isinstance(left, RangeIndexType) and isinstance(right, RangeIndexType):
def sdc_join_range_indexes_impl(left, right):
if (left is right or numpy_like.array_equal(left, right)):
joined = left.values
lidx = numpy.arange(len(joined))
ridx = lidx
return joined, lidx, ridx
else:
return sdc_join_series_indexes(left.values, right.values)
return sdc_join_range_indexes_impl
elif isinstance(left, RangeIndexType) and isinstance(right, types.Array):
return _convert_to_arrays_impl
elif isinstance(left, types.Array) and isinstance(right, RangeIndexType):
return _convert_to_arrays_impl
# TODO: remove code duplication below and merge numeric and StringArray impls into one
# needs equivalents of numpy.arsort and _hpat_ensure_array_capacity for StringArrays
elif isinstance(left, types.Array) and isinstance(right, types.Array):
numba_common_dtype = find_common_dtype_from_numpy_dtypes([left.dtype, right.dtype], [])
if isinstance(numba_common_dtype, types.Number):
def sdc_join_series_indexes_impl(left, right):
# allocate result arrays
lsize = len(left)
rsize = len(right)
est_total_size = int(1.1 * (lsize + rsize))
lidx = numpy.empty(est_total_size, numpy.int64)
ridx = numpy.empty(est_total_size, numpy.int64)
joined = numpy.empty(est_total_size, numba_common_dtype)
left_nan = []
right_nan = []
for i in range(lsize):
if numpy.isnan(left[i]):
left_nan.append(i)
for i in range(rsize):
if numpy.isnan(right[i]):
right_nan.append(i)
# sort arrays saving the old positions
sorted_left = numpy.argsort(left, kind='mergesort')
sorted_right = numpy.argsort(right, kind='mergesort')
# put the position of the nans in an increasing sequence
sorted_left[lsize-len(left_nan):] = left_nan
sorted_right[rsize-len(right_nan):] = right_nan
i, j, k = 0, 0, 0
while (i < lsize and j < rsize):
joined = _hpat_ensure_array_capacity(k + 1, joined)
lidx = _hpat_ensure_array_capacity(k + 1, lidx)
ridx = _hpat_ensure_array_capacity(k + 1, ridx)
left_index = left[sorted_left[i]]
right_index = right[sorted_right[j]]
if (left_index < right_index) or numpy.isnan(right_index):
joined[k] = left_index
lidx[k] = sorted_left[i]
ridx[k] = -1
i += 1
k += 1
elif (left_index > right_index) or numpy.isnan(left_index):
joined[k] = right_index
lidx[k] = -1
ridx[k] = sorted_right[j]
j += 1
k += 1
else:
# find ends of sequences of equal index values in left and right
ni, nj = i, j
while (ni < lsize and left[sorted_left[ni]] == left_index):
ni += 1
while (nj < rsize and right[sorted_right[nj]] == right_index):
nj += 1
# join the blocks found into results
for s in numpy.arange(i, ni, 1):
block_size = nj - j
to_joined = numpy.repeat(left_index, block_size)
to_lidx = numpy.repeat(sorted_left[s], block_size)
to_ridx = numpy.array([sorted_right[k] for k in numpy.arange(j, nj, 1)], numpy.int64)
joined = _hpat_ensure_array_capacity(k + block_size, joined)
lidx = _hpat_ensure_array_capacity(k + block_size, lidx)
ridx = _hpat_ensure_array_capacity(k + block_size, ridx)
joined[k:k + block_size] = to_joined
lidx[k:k + block_size] = to_lidx
ridx[k:k + block_size] = to_ridx
k += block_size
i = ni
j = nj
# fill the end of joined with remaining part of left or right
if i < lsize:
block_size = lsize - i
joined = _hpat_ensure_array_capacity(k + block_size, joined)
lidx = _hpat_ensure_array_capacity(k + block_size, lidx)
ridx = _hpat_ensure_array_capacity(k + block_size, ridx)
ridx[k: k + block_size] = numpy.repeat(-1, block_size)
while i < lsize:
joined[k] = left[sorted_left[i]]
lidx[k] = sorted_left[i]
i += 1
k += 1
elif j < rsize:
block_size = rsize - j
joined = _hpat_ensure_array_capacity(k + block_size, joined)
lidx = _hpat_ensure_array_capacity(k + block_size, lidx)
ridx = _hpat_ensure_array_capacity(k + block_size, ridx)
lidx[k: k + block_size] = numpy.repeat(-1, block_size)
while j < rsize:
joined[k] = right[sorted_right[j]]
ridx[k] = sorted_right[j]
j += 1
k += 1
return joined[:k], lidx[:k], ridx[:k]
return sdc_join_series_indexes_impl
else:
return None
elif (left == string_array_type and right == string_array_type):
def sdc_join_series_indexes_impl(left, right):
# allocate result arrays
lsize = len(left)
rsize = len(right)
est_total_size = int(1.1 * (lsize + rsize))
lidx = numpy.empty(est_total_size, numpy.int64)
ridx = numpy.empty(est_total_size, numpy.int64)
# use Series.sort_values since argsort for StringArrays not implemented
original_left_series = | pandas.Series(left) | pandas.Series |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import pandas as pd
import cobra
from cobra_utils.query.met_info import classify_metabolites_by_type
def rxn_info_from_metabolites(model, metabolites, verbose=True):
'''
This function looks for all the reactions where the metabolites in the list participate. Also, it retrieves the genes
associated to those reactions.
Parameters
----------
model : cobra.core.Model.Model
A cobra model.
metabolites : array-like
An iterable object containing a list of metabolite ids present in the model.
verbose : boolean, True by default.
A variable to enable or disable the printings of this function.
Returns
-------
rxn_gene_association : pandas.DataFrame
A pandas dataframe containing the information retrieved. The columns are :
'MetName', 'MetID', 'RxnID', 'RxnName', 'GeneID', 'Subsystem', 'RxnFormula'
'''
if verbose:
print('Using list of metabolites to get reactions where they participate. Also, getting genes of those reactions.')
rxn_gene_association = []
for metabolite in metabolites:
met = model.metabolites.get_by_id(metabolite)
for rxn in met.reactions:
if len(rxn.genes) != 0:
for gene in rxn.genes:
rxn_gene_association.append(
(rxn.id, rxn.name, str(gene.id), rxn.subsystem, rxn.reaction, met.id, met.name))
else:
rxn_gene_association.append(
(rxn.id, rxn.name, '', rxn.subsystem, rxn.reaction, met.id, met.name))
labels = ['RxnID', 'RxnName', 'GeneID', 'Subsystem', 'RxnFormula', 'MetID', 'MetName']
rxn_gene_association = | pd.DataFrame.from_records(rxn_gene_association, columns=labels) | pandas.DataFrame.from_records |
'''
MIT License
Copyright (c) 2020 Minciencia
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import requests
import utils
import pandas as pd
import datetime as dt
import numpy as np
from itertools import groupby
import time
class vacunacion:
def __init__(self,output,indicador):
self.output = output
self.indicador = indicador
self.my_files = {
'vacunacion_fabricante':
'https://raw.githubusercontent.com/IgnacioAcunaF/covid19-vaccination/master/output/chile-vaccination-type.csv',
'vacunacion_region':
'https://raw.githubusercontent.com/IgnacioAcunaF/covid19-vaccination/master/output/chile-vaccination.csv',
'vacunacion_edad':
'https://github.com/IgnacioAcunaF/covid19-vaccination/raw/master/output/chile-vaccination-ages.csv',
'vacunacion_grupo':
'https://github.com/IgnacioAcunaF/covid19-vaccination/raw/master/output/chile-vaccination-groups.csv',
}
self.path = '../input/Vacunacion'
def get_last(self):
## baja el archivo que corresponde
if self.indicador == 'fabricante':
print('Retrieving files')
print('vacunacion_fabricante')
r = requests.get(self.my_files['vacunacion_fabricante'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_fabricante' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
elif self.indicador == 'campana':
print('Retrieving files')
print('vacunacion_region')
r = requests.get(self.my_files['vacunacion_region'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_region' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
elif self.indicador == 'edad':
print('Retrieving files')
print('vacunacion_edad')
r = requests.get(self.my_files['vacunacion_edad'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_edad' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
elif self.indicador == 'caracteristicas_del_vacunado':
print('Retrieving files')
print('vacunacion_grupo')
r = requests.get(self.my_files['vacunacion_grupo'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_grupo' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
## selecciona el archivo que corresponde
if self.indicador == 'fabricante':
print('reading files')
print('vacunacion_fabricante')
self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_fabricante.csv')
elif self.indicador == 'campana':
print('reading files')
print('vacunacion_region')
self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_region.csv')
elif self.indicador == 'edad':
print('reading files')
print('vacunacion_edad')
self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_edad.csv')
elif self.indicador == 'caracteristicas_del_vacunado':
print('reading files')
print('vacunacion_grupo')
self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_grupo.csv')
elif self.indicador == 'vacunas_region':
print('reading files')
print('vacunacion por region por dia')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_comuna':
print('reading files')
print('vacunacion por comuna por dia')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_edad_region':
print('reading files')
print('vacunacion por region por edad')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_edad_sexo':
print('reading files')
print('vacunacion por sexo por edad')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_3.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_3_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
print('vacunacion por sexo por edad y FECHA')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_6.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_6_2.csv', sep=';', encoding='ISO-8859-1')
self.last_edad_fecha = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_prioridad':
print('reading files')
print('vacunacion por grupos prioritarios')
self.last_added = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_8.csv', sep=';', encoding='ISO-8859-1')
# aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_8_2.csv', sep=';', encoding='ISO-8859-1')
# self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_comuna_edad':
print('reading files')
print('vacunacion por comuna por edad')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_establecimiento':
print('reading files')
print('vacunacion por establecimiento')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_fabricante':
print('reading files')
print('vacunacion por fabricante y fecha')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_fabricante_edad':
print('reading files')
print('vacunacion por fabricante y edad')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_9.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_9_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
def last_to_csv(self):
if self.indicador == 'fabricante':
## campana por fabricante
self.last_added.rename(columns={'Dose': 'Dosis'}, inplace=True)
self.last_added.rename(columns={'Type': 'Fabricante'}, inplace=True)
self.last_added["Dosis"] = self.last_added["Dosis"].replace({"First": "Primera",
"Second": "Segunda",
"Third": "Tercera",
"Fourth": "Cuarta",
"Unique": "Unica"
})
identifiers = ['Fabricante', 'Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
elif self.indicador == 'campana':
## campana por region
self.last_added.rename(columns={'Dose': 'Dosis'}, inplace=True)
utils.regionName(self.last_added)
self.last_added["Dosis"] = self.last_added["Dosis"].replace({"First": "Primera",
"Second": "Segunda",
"Third": "Tercera",
"Fourth": "Cuarta",
"Unique": "Unica"
})
identifiers = ['Region', 'Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
elif self.indicador == 'edad':
## campana por edad
self.last_added.rename(columns={'Dose': 'Dosis',
'Age':'Rango_etario'}, inplace=True)
self.last_added["Dosis"] = self.last_added["Dosis"].replace({"First": "Primera",
"Second": "Segunda"
})
identifiers = ['Rango_etario', 'Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
elif self.indicador == 'caracteristicas_del_vacunado':
## campana por caracter del vacunado
self.last_added.rename(columns={'Dose': 'Dosis',
'Group':'Grupo'}, inplace=True)
self.last_added["Dosis"] = self.last_added["Dosis"].replace({"First": "Primera",
"Second": "Segunda"
})
identifiers = ['Grupo', 'Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
elif self.indicador == 'vacunas_region':
self.last_added.rename(columns={'REGION_CORTO': 'Region',
'COD_COMUNA_FINAL': 'Comuna',
'FECHA_INMUNIZACION': 'Fecha',
'SUM_of_SUM_of_2aDOSIS': 'Segunda_comuna',
'SUM_of_SUM_of_1aDOSIS': 'Primera_comuna',
'SUM_of_SUM_of_ÚnicaDOSIS':'Unica_comuna',
'SUM_of_4_Dosis':'Cuarta_comuna',
'SUM_of_Refuerzo_DOSIS':'Refuerzo_comuna'}, inplace=True)
self.last_added = self.last_added.dropna(subset=['Fecha'])
self.last_added['Fecha'] = pd.to_datetime(self.last_added['Fecha'],format='%d/%m/%Y').dt.strftime("%Y-%m-%d")
self.last_added.sort_values(by=['Region','Fecha'], inplace=True)
utils.regionName(self.last_added)
regiones = pd.DataFrame(self.last_added['Region'].unique())
#transformar
## agrupar por comuna
self.last_added['Primera'] = self.last_added.groupby(['Region','Fecha'])['Primera_comuna'].transform('sum')
self.last_added['Segunda'] = self.last_added.groupby(['Region','Fecha'])['Segunda_comuna'].transform('sum')
self.last_added['Unica'] = self.last_added.groupby(['Region', 'Fecha'])['Unica_comuna'].transform('sum')
self.last_added['Refuerzo'] = self.last_added.groupby(['Region', 'Fecha'])['Refuerzo_comuna'].transform('sum')
self.last_added['Cuarta'] = self.last_added.groupby(['Region', 'Fecha'])['Cuarta_comuna'].transform(
'sum')
self.last_added = self.last_added[['Region','Fecha','Primera','Segunda','Unica','Refuerzo','Cuarta']]
self.last_added.drop_duplicates(inplace=True)
##llenar fechas para cada region y crear total
idx = pd.date_range(self.last_added['Fecha'].min(), self.last_added['Fecha'].max())
df = pd.DataFrame()
total = pd.DataFrame(columns=['Region','Fecha','Primera','Segunda','Unica','Refuerzo','Cuarta'])
total = utils.fill_in_missing_dates(total, 'Fecha', 0, idx)
total["Region"] = total["Region"].replace({0: 'Total'})
for region in regiones[0]:
df_region = self.last_added.loc[self.last_added['Region'] == region]
df_region = utils.fill_in_missing_dates(df_region,'Fecha',0,idx)
df_region["Region"] = df_region["Region"].replace({0:region})
total['Primera'] = df_region['Primera'] + total['Primera']
total['Segunda'] = df_region['Segunda'] + total['Segunda']
total['Unica'] = df_region['Unica'] + total['Unica']
total['Refuerzo'] = df_region['Refuerzo'] + total ['Refuerzo']
total['Cuarta'] = df_region['Cuarta'] + total['Cuarta']
df = df.append(df_region, ignore_index=True)
total = total.append(df,ignore_index=True)
total['Fecha'] = total['Fecha'].dt.strftime("%Y-%m-%d")
self.last_added = total
##sumar totales
self.last_added['Primera'] = pd.to_numeric(self.last_added['Primera'])
self.last_added['Segunda'] = pd.to_numeric(self.last_added['Segunda'])
self.last_added['Unica'] = pd.to_numeric(self.last_added['Unica'])
self.last_added['Refuerzo'] = pd.to_numeric(self.last_added['Refuerzo'])
self.last_added['Cuarta'] = pd.to_numeric(self.last_added['Cuarta'])
self.last_added['Primera'] = self.last_added.groupby(['Region'])['Primera'].transform('cumsum')
self.last_added['Segunda'] = self.last_added.groupby(['Region'])['Segunda'].transform('cumsum')
self.last_added['Unica'] = self.last_added.groupby(['Region'])['Unica'].transform('cumsum')
self.last_added['Refuerzo'] = self.last_added.groupby(['Region'])['Refuerzo'].transform('cumsum')
self.last_added['Cuarta'] = self.last_added.groupby(['Region'])['Cuarta'].transform('cumsum')
self.last_added['Total'] = self.last_added.sum(numeric_only=True, axis=1)
##transformar en input
df = pd.DataFrame()
regiones = pd.DataFrame(self.last_added['Region'].unique())
for region in regiones[0]:
df_region = self.last_added.loc[self.last_added['Region'] == region]
df_region.set_index('Fecha',inplace=True)
df_region = df_region[['Primera','Segunda','Unica','Refuerzo','Cuarta']].T
df_region.reset_index(drop=True, inplace=True)
df = df.append(df_region, ignore_index=True)
new_col = ['Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta']
df.insert(0, column='Dosis', value=new_col)
new_col = pd.DataFrame()
for region in regiones[0]:
col = [region,region,region,region,region]
new_col = new_col.append(col, ignore_index=True)
df.insert(0, column='Region', value=new_col)
self.last_added = df
identifiers = ['Region', 'Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
df_std.to_json(self.output + '.json',orient='values',force_ascii=False)
elif self.indicador == 'vacunas_edad_region':
self.last_added.rename(columns={'NOMBRE_REGION': 'Region',
'COD_COMUNA': 'Comuna',
'EDAD_ANOS': 'Edad',
'POBLACION':'Poblacion',
'2aDOSIS_RES': 'Segunda_comuna',
'1aDOSIS_RES': 'Primera_comuna',
'4aDOSIS':'Cuarta_comuna',
'Refuerzo_DOSIS':'Refuerzo_comuna',
'ÚnicaDOSIS':'Unica_comuna'}, inplace=True)
self.last_added.sort_values(by=['Region', 'Edad'], inplace=True)
utils.regionName(self.last_added)
regiones = pd.DataFrame(self.last_added['Region'].unique())
# transformar
## agrupar por comuna
self.last_added['Primera'] = self.last_added.groupby(['Region', 'Edad'])['Primera_comuna'].transform('sum')
self.last_added['Segunda'] = self.last_added.groupby(['Region', 'Edad'])['Segunda_comuna'].transform('sum')
self.last_added['Unica'] = self.last_added.groupby(['Region', 'Edad'])['Unica_comuna'].transform('sum')
self.last_added['Refuerzo'] = self.last_added.groupby(['Region', 'Edad'])['Refuerzo_comuna'].transform('sum')
self.last_added['Cuarta'] = self.last_added.groupby(['Region', 'Edad'])['Cuarta_comuna'].transform('sum')
self.last_added['Poblacion'] = self.last_added.groupby(['Region','Edad'])['Poblacion'].transform('sum')
self.last_added = self.last_added[['Region', 'Edad', 'Poblacion','Primera', 'Segunda','Unica','Refuerzo','Cuarta']]
self.last_added.drop_duplicates(inplace=True)
##crear total
df = pd.DataFrame()
total = pd.DataFrame(columns=['Region', 'Edad','Poblacion','Primera', 'Segunda','Unica','Refuerzo','Cuarta'])
total['Edad'] = list(range(15, 81))
total["Region"] = total["Region"].fillna('Total')
for region in regiones[0]:
df_region = self.last_added.loc[self.last_added['Region'] == region]
df_region.reset_index(drop=True, inplace=True)
total['Primera'] = total.Primera.fillna(0) + df_region.Primera.fillna(0)
total['Segunda'] = total.Segunda.fillna(0) + df_region.Segunda.fillna(0)
total['Unica'] = total.Unica.fillna(0) + df_region.Unica.fillna(0)
total['Refuerzo'] = total.Refuerzo.fillna(0) + df_region.Refuerzo.fillna(0)
total['Cuarta'] = total.Cuarta.fillna(0) + df_region.Cuarta.fillna(0)
total['Poblacion'] = total.Poblacion.fillna(0) + df_region.Poblacion.fillna(0)
df = df.append(df_region, ignore_index=True)
total = total.append(df, ignore_index=True)
self.last_added = total
##transformar en input
df = pd.DataFrame()
regiones = pd.DataFrame(self.last_added['Region'].unique())
for region in regiones[0]:
df_region = self.last_added.loc[self.last_added['Region'] == region]
df_region.set_index('Edad', inplace=True)
df_region = df_region[['Primera', 'Segunda','Unica','Refuerzo','Cuarta']].T
df_region.reset_index(drop=True, inplace=True)
df = df.append(df_region, ignore_index=True)
new_col = ['Primera', 'Segunda', 'Unica','Refuerzo','Cuarta','Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta',
'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta',
'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta',
'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta',
'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta']
df.insert(0, column='Dosis', value=new_col)
new_col = pd.DataFrame()
for region in regiones[0]:
col = [region, region,region]
new_col = new_col.append(col, ignore_index=True)
df.insert(0, column='Region', value=new_col)
self.last_added = df
identifiers = ['Region','Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Edad'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
df_std.to_json(self.output + '.json',orient='values',force_ascii=False)
elif self.indicador == 'vacunas_edad_sexo':
#Por región, totales
self.last_added.rename(columns={'NOMBRE_REGION': 'Region',
'SEXO1': 'Sexo',
'EDAD_ANOS': 'Edad',
'POBLACION':'Poblacion',
'SUM_of_1aDOSIS': 'Primera',
'SUM_of_2aDOSIS': 'Segunda',
'SUM_of_ÚnicaDOSIS':'Unica',
'SUM_of_Refuerzo_DOSIS':'Refuerzo',
'SUM_of_4_Dosis':'Cuarta'}, inplace=True)
self.last_added.sort_values(by=['Sexo','Edad'], inplace=True)
self.last_added = self.last_added[['Sexo','Edad','Primera','Segunda','Unica','Refuerzo','Cuarta']]
sexo = pd.DataFrame(self.last_added['Sexo'].unique())
##crear total
df = pd.DataFrame()
for sex in sexo[0]:
total = pd.DataFrame(columns=['Sexo', 'Edad', 'Primera', 'Segunda','Unica','Refuerzo','Cuarta'])
total['Edad'] = list(range(self.last_added.Edad.min(), self.last_added.Edad.max() + 1))
df_sex = self.last_added.loc[self.last_added['Sexo'] == sex]
df_sex.reset_index(drop=True, inplace=True)
df_sex.index = df_sex['Edad']
total.index = total['Edad']
total['Sexo'] = total.Sexo.fillna(sex)
total['Primera'] = total.Primera.fillna(0) + df_sex.Primera.fillna(0)
total['Segunda'] = total.Segunda.fillna(0) + df_sex.Segunda.fillna(0)
total['Unica'] = total.Unica.fillna(0) + df_sex.Unica.fillna(0)
total['Refuerzo'] = total.Refuerzo.fillna(0) + df_sex.Refuerzo.fillna(0)
total['Cuarta'] = total.Cuarta.fillna(0) + df_sex.Cuarta.fillna(0)
df = df.append(total, ignore_index=True)
self.last_added = df
##transformar en input
df = pd.DataFrame()
sexo = pd.DataFrame(self.last_added['Sexo'].unique())
for sex in sexo[0]:
df_sex = self.last_added.loc[self.last_added['Sexo'] == sex]
df_sex.set_index('Edad', inplace=True)
df_sex = df_sex[['Primera', 'Segunda','Unica','Refuerzo','Cuarta']].T
df_sex.reset_index(drop=True, inplace=True)
df = df.append(df_sex, ignore_index=True)
new_col = ['Primera', 'Segunda','Unica','Refuerzo','Cuarta', 'Primera', 'Segunda','Unica','Refuerzo','Cuarta', 'Primera', 'Segunda','Unica','Refuerzo','Cuarta', 'Primera', 'Segunda','Unica','Refuerzo','Cuarta']
df.insert(0, column='Dosis', value=new_col)
new_col = pd.DataFrame()
for sex in sexo[0]:
col = [sex, sex,sex]
new_col = new_col.append(col, ignore_index=True)
df.insert(0, column='Sexo', value=new_col)
self.last_added = df
identifiers = ['Sexo','Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Edad'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
df_std.to_json(self.output + '.json', orient='values', force_ascii=False)
# Por fecha, totales
self.last_edad_fecha.rename(columns={'FECHA_INMUNIZACION': 'Fecha',
'EDAD_ANOS': 'Edad',
'SUM_of_1aDOSIS': 'Primera',
'SUM_of_2aDOSIS': 'Segunda',
'SUM_of_SUM_of_ÚnicaDOSIS': 'Unica',
'SUM_of_Refuerzo_DOSIS':'Refuerzo',
'SUM_of_4aDOSIS':'Cuarta'}, inplace=True)
self.last_edad_fecha['Fecha'] = pd.to_datetime(self.last_edad_fecha['Fecha'], format='%d/%m/%Y').dt.strftime("%Y-%m-%d")
self.last_edad_fecha.sort_values(by=['Fecha', 'Edad'], inplace=True)
self.last_edad_fecha.reset_index(drop=True,inplace=True)
self.last_edad_fecha.dropna(subset=['Fecha'],inplace=True)
columns_name = self.last_edad_fecha.columns.values
maxSE = self.last_edad_fecha[columns_name[0]].max()
minSE = self.last_edad_fecha[columns_name[0]].min()
#print(minSE, maxSE)
lenSE = (pd.to_datetime(maxSE) - pd.to_datetime(minSE)).days + 1
startdate = | pd.to_datetime(minSE) | pandas.to_datetime |
# import linkml
import logging
import random
import re
# urllib? urllib3? pure requests?
import urllib
import click
import click_log
import linkml.utils.rawloader as rl
import linkml_runtime
import pandas as pd
import requests
import yaml
from linkml_runtime.dumpers import yaml_dumper
# cosine? SIFT4?
from strsimpy.cosine import Cosine
# # for querying and changing?
# import linkml_runtime_api
# # PROGRESS
# # took out globals
# # reusing requests sessions
# # saving annotations
# # logging
# # TODO
# # add caching of already searched terms
# # add default values for functions
# todo this silences
# SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame
# but I should really be dealing with it
pd.options.mode.chained_assignment = None # default='warn'
logger = logging.getLogger(__name__)
click_log.basic_config(logger)
# make classes out of DataFrames that would be appealing as globals
# TODO try dataclasses
class DataFrameClass:
def __init__(self):
self.mapping_frame = pd.DataFrame()
def add(self, miniframe):
self.mapping_frame = self.mapping_frame.append(miniframe)
def get(self):
return self.mapping_frame
def parse_yaml_file(yaml_file_name):
with open(yaml_file_name, 'r') as stream:
try:
parse_res = yaml.safe_load(stream)
return parse_res
except yaml.YAMLError as exc:
# log or print?
logger.error(exc)
def dict_to_schema(dict_param):
converted_schema = rl.load_raw_schema(dict_param)
return converted_schema
def request_an_enum(schema_param, enum_name_param):
enum_requested = schema_param.enums[enum_name_param]
return enum_requested
def request_pvs(enum_param):
pvs_requested = enum_param.permissible_values
return pvs_requested
# make sorting optional?
def get_pv_names(pv_param):
pv_names = [k for k, v in pv_param.items()]
pv_names.sort()
return pv_names
def make_cosine_obj(shingle_size_param):
made_cosine_obj = Cosine(shingle_size_param)
return made_cosine_obj
def ols_term_search(term, chars_to_whiteout, ontology_param, qf_param, rowcount_param, blank_row_param,
global_frame_param, session_param, ols_search_base_url):
woed = do_whiteout(term, chars_to_whiteout)
request_string = ols_search_base_url + \
'?q=' + \
urllib.parse.quote(woed) + '&' + \
'type=class' + '&' + \
'exact=false' + '&' + \
ontology_param + "&" + \
'rows=' + str(rowcount_param) + '&' + \
qf_param
logger.debug(request_string)
# this gets matching terms but doesn't show why they matched
response_param = session_param.get(request_string)
ols_string_search_res_j = response_param.json()
ols_string_search_res_frame = pd.DataFrame(ols_string_search_res_j['response']['docs'])
ols_string_search_res_frame.insert(0, "raw_query", term)
ols_string_search_res_frame.insert(0, "tidied_query", woed)
# did the string search get any result rows?
r, c = ols_string_search_res_frame.shape
if r == 0:
no_search_res_dict = blank_row_param.copy()
# no_search_res_dict['id'] = term
no_search_res_dict['raw_query'] = term
no_search_res_dict['tidied_query'] = woed
no_search_res_frame = pd.DataFrame([no_search_res_dict])
ols_string_search_res_frame = ols_string_search_res_frame.append(no_search_res_frame)
# failures.append(orig_enum)
global_frame_param.add(ols_string_search_res_frame)
# return ols_string_search_res_frame?
return True
# TODO refactor
def make_ontolgy_phrase(ontology_param):
ontologies_phrase = ''
if ontology_param is not None and ontology_param != "":
ontologies_phrase = 'ontology=' + ontology_param.lower()
return ontologies_phrase
# TODO refactor
def make_qf_phrase(qf_param):
qf_phrase = ''
if qf_param is not None and qf_param != "":
qf_phrase = 'queryFields=' + qf_param.lower()
return qf_phrase
def do_whiteout(raw_string, chars_to_whiteout):
if chars_to_whiteout is not None and chars_to_whiteout != "":
tidied_string = re.sub(r'[' + chars_to_whiteout + ']+', ' ', raw_string)
else:
tidied_string = raw_string
return tidied_string
def get_ols_term_annotations(iri_param, ontology_param, session_param, ols_terms_based_url, term_annotations):
logger.info(iri_param)
once = urllib.parse.quote(iri_param, safe='')
twice = urllib.parse.quote(once, safe='')
# build url from base
term_retr_assembled = ols_terms_based_url + ontology_param + '/terms/' + twice
term_details = session_param.get(term_retr_assembled)
term_json = term_details.json()
if 'label' in set(term_json.keys()):
# logger.debug(term_retr_assembled)
# term_label = term_json['label']
# logger.debug(term_label)
label_frame = pd.DataFrame([[term_json['label'], 'label', 'label', '']],
columns=['name', 'scope', 'type', 'xrefs'])
label_frame['obo_id'] = term_json['obo_id']
label_frame['pref_lab'] = term_json['label']
label_frame.insert(0, "iri", iri_param)
term_annotations.add(label_frame)
if 'obo_synonym' in set(term_json.keys()):
obo_syn_json = term_json['obo_synonym']
obo_syn_frame = pd.DataFrame(obo_syn_json)
obo_syn_frame['obo_id'] = term_json['obo_id']
obo_syn_frame['pref_lab'] = term_json['label']
obo_syn_frame.insert(0, "iri", iri_param)
term_annotations.add(obo_syn_frame)
return True
@click.command()
@click_log.simple_verbosity_option(logger)
@click.option('--modelfile', help="path to LinkML input", type=click.Path(exists=True), show_default=True,
required=True)
# parametrize this so this script can be written many times an not overwrite the all mappings file
@click.option('--all_mappings_fn', default="target/all_mappings_frame.tsv",
help="where do you want to write a table of all mappings?",
type=click.Path(), show_default=True)
# binomial_name_enum
@click.option('--requested_enum_name', help="name of the enumeration that contains the terms you want to map",
required=True)
@click.option('--overwrite_meaning/--no_overwrite', help="do you want to overwrite meanings that are already present?",
default=True, show_default=True)
@click.option('--whiteout_chars', help="characters in terms that should be replaced with whitespace before mapping",
default='._-', show_default=True)
# ontology_string = "NCBItaxon,PATO"
@click.option('--ontology_string', help="comma separated list of ontologies to use in the mapping", default='NCBItaxon',
show_default=True)
@click.option('--ols_search_base_url', help="", default='http://www.ebi.ac.uk/ols/api/search', show_default=True)
@click.option('--ols_terms_based_url', help="", default='http://www.ebi.ac.uk/ols/api/ontologies/',
show_default=True)
@click.option('--desired_row_count', help="how many rows of mappings do you want to retrieve for each term?", default=5,
show_default=True)
@click.option('--shingle_size', help="what shingle/n-gram size do you want for cosine similarity calculations?",
default=2, show_default=True)
@click.option('--max_cosine',
help="""how much of a cosine distance will you tolerate
when comparing an enum name to a term lable or synonym?""",
default=0.05, show_default=True)
@click.option('--query_field_string',
help="""do you want to define a custom list of fields to search in?
The default settings work well in most cases.""",
default='', show_default=True)
@click.option('--test_sample_size',
help="""if greater than 0, the enum name list will be samples at this size before mapping.""",
default=0, show_default=True)
def enum_annotator(modelfile, all_mappings_fn, requested_enum_name, whiteout_chars, ontology_string,
ols_search_base_url, ols_terms_based_url, desired_row_count, shingle_size, max_cosine,
overwrite_meaning, query_field_string, test_sample_size):
# show entire width of data frames
pd.set_option('display.expand_frame_repr', False)
# GLOBALS within this method
blank_row = {'title': '', 'id': '', 'iri': '', 'is_defining_ontology': '',
'label': '', 'obo_id': '', 'ontology_name': '', 'ontology_prefix': '',
'short_form': '', 'type': ''}
# ols_annotations_cols = ['name', 'obo_id', 'scope', 'type', 'xrefs']
parsed_yaml = parse_yaml_file(modelfile)
current_schema = dict_to_schema(parsed_yaml)
requested_enum_obj = request_an_enum(current_schema, requested_enum_name)
requested_pvs_obj = request_pvs(requested_enum_obj)
requested_pvs_names = get_pv_names(requested_pvs_obj)
requested_pvs_names.sort()
logger.debug(requested_pvs_names)
ontologies_phrased = make_ontolgy_phrase(ontology_string)
logger.debug(ontologies_phrased)
qf_phrased = make_qf_phrase(query_field_string)
logger.debug(qf_phrased)
cosine_obj = make_cosine_obj(shingle_size)
# logger.debug(cosine_obj)
# initialize
enum_name_mappings = DataFrameClass()
# logger.debug(enum_name_mappings.get())
term_annotations = DataFrameClass()
# logger.debug(term_annotations.get())
reusable_session = requests.Session()
# logger.debug(reusable_session)
# # load with schemaview and extract SchemaDefinition?
# # or open directly into a SchemaDefinition? HOW?
# # https://fedingo.com/how-to-read-yaml-file-to-dict-in-python/
if test_sample_size > 0:
requested_pvs_names = random.sample(requested_pvs_names, test_sample_size)
requested_pvs_names.sort()
for pv_name in requested_pvs_names:
logger.info(pv_name)
# current_pv = requested_pvs_obj[pv_name]
# logger.debug(current_pv)
ols_term_search(pv_name, whiteout_chars, ontologies_phrased, qf_phrased, desired_row_count,
blank_row, enum_name_mappings, reusable_session, ols_search_base_url)
# # returns true
# # could look at growth in enum_name_mappings
enum_name_mapping_frame = enum_name_mappings.get()
# logger.debug(enum_name_mapping_frame)
term_and_source = enum_name_mapping_frame.loc[enum_name_mapping_frame['raw_query'].eq(pv_name)]
term_and_source = term_and_source[["iri", "ontology_name"]]
term_and_source.drop_duplicates(inplace=True)
term_and_source = term_and_source.loc[~term_and_source['iri'].eq("")]
term_and_source.sort_values(["iri", "ontology_name"], inplace=True)
logger.debug(term_and_source)
term_and_source = term_and_source.to_dict(orient="records")
for i in term_and_source:
# returns true and saves to a dataframe class
get_ols_term_annotations(i["iri"], i["ontology_name"], reusable_session,
ols_terms_based_url, term_annotations)
annotations_from_terms = term_annotations.get()
raw_through_annotations = enum_name_mapping_frame.merge(annotations_from_terms, how='left', on="iri",
suffixes=('_term', '_ano'))
for_str_dist = raw_through_annotations[["tidied_query", "name"]]
# 20211215 0912
# A value is trying to be set on a copy of a slice from a DataFrame.
for_str_dist["tidied_query_lc"] = for_str_dist["tidied_query"].str.lower()
for_str_dist["name_lc"] = for_str_dist["name"].str.lower()
logger.debug(for_str_dist)
# favoring simplicity over efficiency
# ie may be string-comparing some duplicates
# easier to merge back in
# for_str_dist = for_str_dist.loc[
# ~for_str_dist["tidied_query"].eq("") and ~for_str_dist["label"].eq("") and ~for_str_dist[
# "tidied_query"].isnull() and ~for_str_dist["label"].isnull()]
# for_str_dist.drop_duplicates(inplace=True)
# for_str_dist.sort_values(["tidied_query", "label"], inplace=True)
for_str_dist_dict = for_str_dist.to_dict(orient="records")
# dist_list = []
new_pair_list = []
for pair in for_str_dist_dict:
# used to get_profile
name_type = type(pair["name"])
if name_type is str:
the_dist = cosine_obj.distance(pair["tidied_query_lc"], pair["name_lc"])
pair['cosine'] = the_dist
else:
pair['cosine'] = None
new_pair_list.append(pair)
for_str_dist = pd.DataFrame(new_pair_list)
for_str_dist.drop(labels=["tidied_query_lc", "name_lc"], axis=1, inplace=True)
# was debug
logger.debug(for_str_dist)
raw_through_dist = raw_through_annotations.merge(for_str_dist, how="left", on=["tidied_query", "name"])
all_mappings_frame = []
new_enum = linkml_runtime.linkml_model.EnumDefinition(name=requested_enum_name)
# logger.info(new_enum)
# looping inside the same loop ?!
for i in requested_pvs_names:
# todo unnest loop?
logger.debug(i)
ce = requested_pvs_obj[i]
cr = raw_through_dist.loc[raw_through_dist["raw_query"].eq(i)]
all_mappings_frame.append(cr)
cr_row_count = len(cr.index)
if cr_row_count > 0:
min_cosine = cr["cosine"].min()
with_min = cr.loc[cr["cosine"] == min_cosine]
with_min_row_count = len(with_min.index)
if with_min_row_count > 0:
with_min = with_min.drop(labels=['xrefs'], axis=1)
if 'description' in with_min.columns:
with_min['description'] = str(with_min['description'])
with_min.drop_duplicates(inplace=True)
deduped_row_count = len(with_min.index)
# # I'm surprised that there aren't any 2+ equally good mappings here
# will have to deal with that at some point
# may still need to do some row filtering/prioritizing by source or annotation type
# prefer label over synonym
# Prefer ontologies in the order they appear in XXX parameter?
if deduped_row_count > 1:
pass
first_row_as_dict = (with_min.to_dict(orient="records"))[0]
ce.annotations["match_val"] = first_row_as_dict['name']
ce.annotations["match_type"] = first_row_as_dict['scope']
ce.annotations["cosine"] = first_row_as_dict['cosine']
if overwrite_meaning:
if first_row_as_dict['cosine'] <= max_cosine:
ce.meaning = first_row_as_dict['obo_id_term']
ce.title = first_row_as_dict['label']
else:
ce.meaning = None
ce.title = None
new_enum.permissible_values[i] = ce
all_mappings_frame = | pd.concat(all_mappings_frame) | pandas.concat |
"""Argument parser for obtaining WR distances"""
import argparse
import wrdists.collated_functions as cf
import pandas as pd
def main():
"""Incorporate into function to be run using the command line when installed."""
parser = argparse.ArgumentParser()
"""Set Pandas dataframes to show all elements"""
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
"""Necessary arguments to calculate distances"""
parser.add_argument('-p', help='Gaia parallax (mas) when in single mode (float). Column number containing parallax \
list when in file mode (int).', action='store', dest='par', type=float, required=True)
parser.add_argument('-pe', help='Gaia parallax error (mas) when in single mode (float). Column number containing \
parallax error list when in file mode (int).', action='store', dest='parerr',
type=float, required=True)
parser.add_argument('-g', help='Gaia G band magnitude when in single mode (float). Column number containing G mag list \
when in file mode (int).', action='store', dest='g', type=float, required=True)
parser.add_argument('-ra', help='Gaia right ascension (RA) when in single mode (float). Column number containing RA \
list when in file mode (int).', action='store', dest='ra', type=float, required=True)
parser.add_argument('-dec', help='Gaia declination (DEC) when in single mode (float). Column number containing DEC \
list when in file mode (int).', action='store', dest='dec', type=float, required=True)
parser.add_argument('-ast', help='Gaia astrometric excess noise when in single mode (float). Column number containing \
excess noise list when in file mode (int).', action='store', dest='ast', type=float,
required=True)
parser.add_argument('-n', help='Star name or identifier (e.g WR1) when in single mode (str). Column number containing \
list of names when in file mode (int).', action='store', dest='name', type=str,
required=True)
"""Optional arguments"""
# Load in a list of results:
parser.add_argument('-fin', help='File path from which to load a file containing parameters when executing for \
lists of stars (str).', action='store', dest='list_mode_fin', type=str, default=False)
parser.add_argument('-fout', help='File path to store ouput to when executing with a file input (str).',
action='store', dest='list_mode_fout', type=str, default=False)
parser.add_argument('-ph', help='Preserve the header if the file input contains one (no argument)',
action='store_true', dest='header', default=False)
parser.add_argument('-dmt', help='Specify a delimiter for the input file (str).', action='store', dest='delimit', type=str,
default=',')
parser.add_argument('-zpt_list', help='Specify the column number containing the zero points (if used) (int).', action='store', dest='zpt_list', type=int,
default=False)
# Other options:
parser.add_argument('-zpt', help='Set the zero point of the parallaxes (mas) to an alternative value \
(default = -0.029 mas) (float).', action='store', default=-0.029, type=float, dest='zpt')
parser.add_argument('-md','--minimum_dist', help='Set the minimum distance of the prior (pc), which is useful for \
constraining the prior (float).', action='store', default=300, type=float,
dest='md')
parser.add_argument('-es','--error_sigma', help='Set the credible interval coverage range (float).', action='store',
default=0.68, type=float, dest='esig')
# Save plots and/or posterior distribution:
parser.add_argument('-pt', '--plot', help='Plot the output distributions of the prior, likelihood and posterior, \
along with the credible intervals (uncertainty bounds) and most likely \
distance (default = False). The input string should be the path to save \
the plotted image(s) (str).', action='store', default=False, type=str,
dest='plot_data')
parser.add_argument('-dist', '--distribution', help='Saves the posterior distance distribution as a csv '
'which can be loaded and used in another python program. The '
'input string should be the path to save the distribution data (str).',
action='store', default=False, type=str, dest='save_distribution')
# Exclude dust distribution or parallax resizing:
parser.add_argument('-ed','--exclude_dust', help='Exclude dust from the prior (use HII regions only), which may be \
useful to compare the effects of different priors (default = False) (no argument).',
action='store_true', default=False, dest='dust_exc')
parser.add_argument('-ee','--exclude_err', help='Exclude resizing of parallax errors (compared to external catalogues, \
Arenou et al. 2018) and zero point correction. May be useful for data \
comparison or application to non Gaia parallaxes (e.g Hipparcos) \
(default = False) (no argument)', action='store_true', default=False,
dest='err_exc')
"""Run the code to get distances"""
args = parser.parse_args()
if args.list_mode_fin:
args.par = int(args.par)
args.parerr = int(args.parerr)
args.g = int(args.g)
args.ra = int(args.ra)
args.dec = int(args.dec)
args.ast = int(args.ast)
args.name = int(args.name)
# Convert params to integer types.
if args.header:
data = pd.read_csv(args.list_mode_fin, delimiter=args.delimit)
else:
data = | pd.read_csv(args.list_mode_fin, header=None, delimiter=args.delimit) | pandas.read_csv |
import numpy as np
import pandas as pd
import data_helpers
import config
import os
import random
from numpy.random import seed
seed(123)
# Load sentences and output_layers
# ==============================================================================================================
# input_dir = '/Volumes/Maxtor/runs_cluster/lstm7/'
# path_to_dir = '/Users/danielmlow/Dropbox/cnn/lstm/runs_cluster/18-03-07-22-48/'
# Parameters
'''
Here you can specify which model/s you want to use. Just set
models=['lstm0'] #for 1 model
models=['lstm0', 'cnn4', etc. ] for several models
['lstm0', 'lstm1', 'lstm2', 'lstm3', 'lstm4', 'lstm5', 'lstm6', 'lstm7', 'cnn0', 'cnn1', 'cnn2', 'cnn3']
'''
# models = ['lstm8','cnn7']
input_dir = config.save_to
output_dir = config.save_to
# models = ['lstm7', 'cnn0']
sequence_length = config.sequence_length
# categories = config.categories
evaluate_model = False
if evaluate_model:
statistics = ['spearman', 'pearson']
models = ['cnn21']
n_random_sentences=20 #amount of sentences used to correlate between layers.
top_n_prototypical_sentences = 300 #will be taking 10 random sentences from this n. 500: max lose 15% prototypicality.
# importlib.reload(data_helpers)
# input_dir = '/Volumes/Maxtor/runs_cluster/'
# model_to_use = 'lstm0'
# output_dir = input_dir+model_to_use+'/'
# Xtrain = list(np.load(input_dir+'Xtrain.npz')['a'])# Load Xtrain in order to encode sentences # np.savez_compressed(input_dir+'Xtrain',a=Xtrain)
# Find prototypical sentences
# ==============================================================================================================
# turn Xvalidation into dataframe of all sentences
# sentences = pd.DataFrame(np.zeros((1000,130)))
#
# for x in range(130):
# for y in range(1000):
# sentences.iloc[y,x] = Xvalidation[x*1000+y]
#
# np.savez_compressed(path_to_dir+'sentences_df', sentences)
# Find prototypical sentences of categories: the ones that, on average, correlate the most with other sentences of their category.
def prototypical_sentences(statistic, Xvalidation, Xvalidation_raw, output_dir, layer=None, validation_size = None, amount_sent = None, nan='with_zeros_', categories=None):
print('finding prototypical sentences...')
# takes about 10 minutes
df_prototypical = pd.DataFrame(np.zeros(shape=(amount_sent,1)))
df_prototypical_score = pd.DataFrame(np.zeros(shape=(amount_sent,1)))
i=0
for cat_start in range(0,int(len(categories)*amount_sent),amount_sent): #eg, 960 sentences at a time, sentence for 1 category
# corrcoef = pd.DataFrame(np.corrcoef(layer.iloc[cat_start:cat_start + amount_sent, :]))
corrcoef = layer.iloc[cat_start:cat_start+amount_sent].T.corr(method=statistic) #correlate each sentence feat vector with eachother for a single category
mean_corrcoef = corrcoef.mean(axis=0) #axis=0 is ccrrect regarding true sentence ID
sorted_means = mean_corrcoef.sort_values(ascending=False)
sorted_index = pd.DataFrame(sorted_means.index, columns=[categories[i]])
df_prototypical = pd.concat([df_prototypical,sorted_index], axis=1)
sorted_means_index_reset = pd.DataFrame(sorted_means, columns=[categories[i]]).reset_index(drop=True)
df_prototypical_score = pd.concat([df_prototypical_score, sorted_means_index_reset], axis=1)
i += 1
df_prototypical = df_prototypical.iloc[:,1:] #The index of the highest correlating sentences
df_prototypical_score = df_prototypical_score.iloc[:,1:] #The average correlation for that sentence with other sentences of its category
# turn indexes into sentences: TAKES A WHILE
print('making df_prototypical_sentences.csv ...')
df_prototypical_sentences = pd.DataFrame(np.zeros((amount_sent, len(categories))),columns=categories)
i = 0
for column in range(len(categories)):
for row in range(amount_sent):
df_prototypical_sentences.iloc[row, column] = Xvalidation[int(df_prototypical.iloc[row, column])]
i += 1
df_prototypical_sentences_raw = pd.DataFrame(np.zeros((amount_sent, len(categories))),columns=categories)
for column in range(len(categories)):
for row in range(amount_sent):
df_prototypical_sentences_raw.iloc[row, column] = Xvalidation_raw[df_prototypical.iloc[row, column]]
df_prototypical[categories].to_csv(output_dir + 'df_prototypical_'+str(nan)+statistic+'.csv', index=False, header=True, columns=categories)
df_prototypical_score[categories].to_csv(output_dir + 'df_prototypical_score_'+str(nan)+statistic+'.csv', index=False, header=True, columns=categories)
df_prototypical_sentences[categories].to_csv(output_dir + 'df_prototypical_sentences_'+str(nan)+statistic+'.csv', index=False, encoding='utf-8-sig', header=True, columns=categories)
df_prototypical_sentences_raw[categories].to_csv(
output_dir + 'df_prototypical_sentences_raw_' + str(nan) + statistic + '.csv', index=False, encoding='utf-8-sig',
header=True, columns=categories)
np.savez_compressed(output_dir + 'sentences_prototypical_'+str(nan)+statistic, a=df_prototypical[categories], b=df_prototypical_score[categories], c=df_prototypical_sentences[categories])
return df_prototypical[categories], df_prototypical_score[categories], df_prototypical_sentences[categories]
# Find prototypical sentences
# ============================================================================================================
# You want to stack X (e.g., 10) sentences in one column
def random_n_prototypical_sentences(n, df_prototypical, df_prototypical_sentences, num_categories=None, layer=None, categories=None):
prototypical_random10_layerX = pd.DataFrame()
for i in range(num_categories):
# Take top 200 prototypical sentences (20% for Xvalidation or Xtest)
prototypical_sentences_1cat = df_prototypical_sentences.iloc[:top_n_prototypical_sentences,i]
# Insert column with sentence len
len_sent1 = []
for sent in prototypical_sentences_1cat:
len_sent = len(sent.split(' '))
len_sent1.append(len_sent)
prototypical_sentences_1cat_len = pd.concat([ | pd.DataFrame(prototypical_sentences_1cat) | pandas.DataFrame |
from sklearn.linear_model import Ridge
import numpy
import pandas
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn import linear_model
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
train_data = pd.read_csv('reg_train.csv')
test_data = | pd.read_csv('reg_test.csv') | pandas.read_csv |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
| tm.assert_series_equal(resultb, df['A']) | pandas.util.testing.assert_series_equal |
# Copyright (c) Microsoft Corporation
# Licensed under the MIT License.
import pandas as pd
import pytest
import shap
import sklearn
from sklearn.datasets import fetch_california_housing
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from responsibleai import RAIInsights
@pytest.fixture(scope='session')
def create_rai_insights_object_classification():
X, y = shap.datasets.adult()
y = [1 if r else 0 for r in y]
X, y = sklearn.utils.resample(
X, y, n_samples=1000, random_state=7, stratify=y)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.01, random_state=7, stratify=y)
knn = sklearn.neighbors.KNeighborsClassifier()
knn.fit(X_train, y_train)
X['Income'] = y
X_test['Income'] = y_test
ri = RAIInsights(knn, X, X_test, 'Income', 'classification',
categorical_features=['Workclass', 'Education-Num',
'Marital Status',
'Occupation', 'Relationship',
'Race',
'Sex', 'Country'])
ri.explainer.add()
ri.counterfactual.add(10, desired_class='opposite')
ri.error_analysis.add()
ri.causal.add(treatment_features=['Hours per week', 'Occupation'],
heterogeneity_features=None,
upper_bound_on_cat_expansion=42,
skip_cat_limit_checks=True)
ri.compute()
return ri
@pytest.fixture(scope='session')
def create_rai_insights_object_regression():
housing = fetch_california_housing()
X_train, X_test, y_train, y_test = train_test_split(housing.data,
housing.target,
test_size=0.005,
random_state=7)
X_train = pd.DataFrame(X_train, columns=housing.feature_names)
X_test = | pd.DataFrame(X_test, columns=housing.feature_names) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import jsonRW as jsRW
def numToCat(row):
if row['userCritic_difference'] >= 30:
return 'high'
elif row['userCritic_difference'] >= 20:
return 'moderate'
else:
return 'low'
metacritic_list = jsRW.readJson('updated_metacritic2019_data')
metacritic_dict = {'title':[], 'user_rating':[], 'critic_rating':[]}
for elem in metacritic_list:
metacritic_dict['title'].append(elem['title'])
metacritic_dict['user_rating'].append((elem['user_rating']))
metacritic_dict['critic_rating'].append((elem['critic_rating']))
df = pd.DataFrame(metacritic_dict)
tbdIndex = (df[df['user_rating']=="tbd"].index)
df = df.drop(labels=tbdIndex, axis='index')
df['user_rating'] = pd.to_numeric(df['user_rating'])
df['critic_rating'] = | pd.to_numeric(df['critic_rating']) | pandas.to_numeric |
import gitlab
import dateutil.parser
import reader.cache
import hashlib
import logging
from pandas import DataFrame, NaT
from datetime import datetime
class Gitlab:
def __init__(self, gitlab_config: dict, workflow: dict):
self.gitlab_config = gitlab_config
self.workflow = workflow
def cache_name(self):
token = self.gitlab_config["token"]
workflow = str(self.workflow)
url = self.gitlab_config["url"]
project_id = (
self.gitlab_config.get("project_id")
if self.gitlab_config.get("project_id")
else self.gitlab_config.get("group_id")
)
name_hashed = hashlib.md5(
(token + url + workflow + str(project_id)).encode("utf-8")
)
return name_hashed.hexdigest()
self.cache = reader.cache.Cache(cache_name(self))
def get_gitlab_instance(self):
gl = gitlab.Gitlab(
self.gitlab_config["url"], private_token=self.gitlab_config["token"]
)
gl.auth()
return gl
def get_issue_data(self, issue):
issue_data = {
"Key": issue.id,
"Type": "issue",
"Creator": issue.author["name"],
"Created": dateutil.parser.parse(issue.created_at).replace(tzinfo=None),
"Done": (
dateutil.parser.parse(issue.created_at).replace(tzinfo=None)
if issue.created_at
else NaT
),
}
return issue_data
def get_issues(self):
gl = self.get_gitlab_instance()
if self.gitlab_config.get("project_id"):
project = gl.projects.get(self.gitlab_config["project_id"])
issues = project.issues.list()
elif self.gitlab_config.get("group_id"):
group = gl.groups.get(self.gitlab_config["group_id"])
issues = group.issues.list()
else:
raise Exception("No valid project_id or group_id found!")
return issues
def get_data(self) -> DataFrame:
if self.gitlab_config["cache"] and self.cache.is_valid():
logging.debug("Getting gitlab data from cache")
df_issue_data = self.cache.read()
return df_issue_data
issues = self.get_issues()
# issue_data = {"Key": [], "Type": [], "Creator": [], "Created": [], "Done": []}
issues_data = [self.get_issue_data(issue) for issue in issues]
df_issues_data = | DataFrame(issues_data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = | Index(['one', 'two']) | pandas.Index |
#################################################################
# <NAME>, PhD - Imperial College London, 15/09/2020 #
#################################################################
import os
import numpy as np
from numpy import array
import pandas as pd
import math
import glob
#from tqdm import tqdm
from sklearn.neighbors import NearestNeighbors
# Theta in degrees (Optional)
def cart2cylc(x, y, z):
r = np.sqrt(np.power(x,2)+np.power(y,2))
t = math.atan(y/x)*(180/math.pi)
z = z
coord = [r,t,z]
return coord
def unitvar(x, y, z):
u = (1/math.sqrt(3))*(x+y+z)
v = (1/math.sqrt(6))*(x+y-(2*z))
w = (1/math.sqrt(2))*(x-y)
coord = [u, v, w]
return coord
path_data = "/mnt/storage/home/mthanaj/cardiac/UKBB_40616/UKBB_test/4DSegment2.0_test_motion_final"
folder = os.listdir(path_data)
for iP in range(0,10):
file = os.path.join(os.path.join(path_data,folder[iP],"motion"))
os.chdir(file)
txt_files = array(glob.glob("*.txt"))
files = txt_files[0:100]
ir=1
npo = 50656
Sradial = np.zeros((npo,50))
Scirc = np.zeros((npo,50))
Slong = np.zeros((npo,50))
for iF in range(0,50):
# Step 1 - Call epi and endo, project orthogonally onto the unit variable and bind
os.chdir(file)
print(file)
EDendo = pd.read_csv(files[0,], sep=" ", header=None)
EDendo.columns = ["x", "y", "z"]
EDepi = pd.read_csv(files[50,], sep=" ", header=None)
EDepi.columns = ["x", "y", "z"]
EDepi_data = array(unitvar(EDepi.iloc[:,0],EDepi.iloc[:,1],EDepi.iloc[:,2])).T
EDendo_data = array(unitvar(EDendo.iloc[:,0],EDendo.iloc[:,1],EDendo.iloc[:,2])).T
ED_data = np.concatenate([EDepi_data, EDendo_data], axis=0)
ED_data= | pd.DataFrame(ED_data, columns=["x", "y", "z"]) | pandas.DataFrame |
import unittest
import pandas as pd
import pandas.util.testing as pt
import tia.util.fmt as fmt
def tof(astr):
return float(astr.replace(",", ""))
class TestFormat(unittest.TestCase):
def ae(self, expected, fct, value, **kwargs):
cb = fct(**kwargs)
actual = cb(value)
self.assertEqual(expected, actual)
def test_default_formats(self):
B = float("-1,250,500,880.76".replace(",", ""))
M = B / 1000.0
k = M / 1000.0
p = k / 1000000.0
tests = [
(B, "$(1.3B)", fmt.BillionDollarsFormatter),
(B, "(1.3B)", fmt.BillionsFormatter),
(M, "$(1.3M)", fmt.MillionDollarsFormatter),
(M, "(1.3M)", fmt.MillionsFormatter),
(k, "$(1.3k)", fmt.ThousandDollarsFormatter),
(k, "(1.3k)", fmt.ThousandsFormatter),
(k, "(1,250.50)", fmt.FloatFormatter),
(k, "(1,251)", fmt.IntFormatter),
# Floats
(k, "-1,251", fmt.new_int_formatter(commas=1, parens=False)),
(k, "-1251", fmt.new_int_formatter(commas=0, parens=False)),
(abs(k), "1251", fmt.new_int_formatter(commas=0, parens=False)),
(abs(k), "1,251", fmt.new_int_formatter(commas=1)),
(str(k), "-1,251", fmt.new_int_formatter(commas=1, coerce=True, parens=0)),
# Ints
(k, "-1,251", fmt.new_int_formatter(commas=1, parens=False)),
(k, "-1251", fmt.new_int_formatter(commas=0, parens=False)),
(abs(k), "1251", fmt.new_int_formatter(commas=0, parens=False)),
(abs(k), "1,251", fmt.new_int_formatter(commas=1)),
# Percents
(0.12433, "12.4%", fmt.new_percent_formatter(commas=1, precision=1)),
(0.12433, "12.433%", fmt.new_percent_formatter(commas=1, precision=3)),
(
-0.12433,
"-12.4%",
fmt.new_percent_formatter(commas=1, parens=0, precision=1),
),
(
-0.12433,
"(12.4%)",
fmt.new_percent_formatter(commas=1, parens=1, precision=1),
),
]
for val, expected, fct in tests:
actual = fct(val)
self.assertEqual(expected, actual)
# Test if it were a list
actual = fct([val] * 5)
self.assertEqual([expected] * 5, actual)
# Test if it were a series
actual = fct(pd.Series([val] * 5))
pt.assert_series_equal(pd.Series([expected] * 5), actual)
# Test if it were a DataFrame
actual = fct(pd.DataFrame({"a": [val] * 5, "b": [val] * 5}))
pt.assert_frame_equal(
pd.DataFrame({"a": [expected] * 5, "b": [expected] * 5}), actual
)
def test_fmt_datetime(self):
self.assertEqual(
fmt.new_datetime_formatter("%Y-%m")(pd.to_datetime("1/1/2013")), "2013-01"
)
def test_guess_formatter(self):
for n, t in (3, "k"), (6, "M"), (9, "B"):
m = 10 ** n
s = pd.Series([2.1 * m, -20.1 * m, 200.1 * m])
actual = fmt.guess_formatter(s, precision=1)(s)
expected = pd.Series(["2.1" + t, "(20.1%s)" % t, "200.1" + t])
pt.assert_series_equal(expected, actual)
# percents
s = pd.Series([0.024, -0.561, 0.987])
actual = fmt.guess_formatter(s, precision=1, pcts=1)(s)
expected = pd.Series(["2.4%", "(56.1%)", "98.7%"])
| pt.assert_series_equal(expected, actual) | pandas.util.testing.assert_series_equal |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.base import _registry as ea_registry
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
PeriodIndex,
Series,
Timestamp,
cut,
date_range,
notna,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.tseries.offsets import BDay
class TestDataFrameSetItem:
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(len(float_frame))
float_frame[dtype] = np.array(arr, dtype=dtype)
assert float_frame[dtype].dtype.name == dtype
def test_setitem_list_not_dataframe(self, float_frame):
data = np.random.randn(len(float_frame), 2)
float_frame[["A", "B"]] = data
tm.assert_almost_equal(float_frame[["A", "B"]].values, data)
def test_setitem_error_msmgs(self):
# GH 7432
df = DataFrame(
{"bar": [1, 2, 3], "baz": ["d", "e", "f"]},
index=Index(["a", "b", "c"], name="foo"),
)
ser = Series(
["g", "h", "i", "j"],
index=Index(["a", "b", "c", "a"], name="foo"),
name="fiz",
)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df["newcol"] = ser
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"])
msg = "incompatible index of inserted column with frame index"
with pytest.raises(TypeError, match=msg):
df["gr"] = df.groupby(["b", "c"]).count()
def test_setitem_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=range(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K), index=range(N))
tm.assert_frame_equal(df, expected)
def test_setitem_different_dtype(self):
df = DataFrame(
np.random.randn(5, 3), index=np.arange(5), columns=["c", "b", "a"]
)
df.insert(0, "foo", df["a"])
df.insert(2, "bar", df["c"])
# diff dtype
# new item
df["x"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 5 + [np.dtype("float32")],
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
# replacing current (in different block)
df["a"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2,
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
df["y"] = df["a"].astype("int32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2 + [np.dtype("int32")],
index=["foo", "c", "bar", "b", "a", "x", "y"],
)
tm.assert_series_equal(result, expected)
def test_setitem_empty_columns(self):
# GH 13522
df = DataFrame(index=["A", "B", "C"])
df["X"] = df.index
df["X"] = ["x", "y", "z"]
exp = DataFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"])
tm.assert_frame_equal(df, exp)
def test_setitem_dt64_index_empty_columns(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert df["A"].dtype == np.dtype("M8[ns]")
def test_setitem_timestamp_empty_columns(self):
# GH#19843
df = DataFrame(index=range(3))
df["now"] = Timestamp("20130101", tz="UTC")
expected = DataFrame(
[[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_wrong_length_categorical_dtype_raises(self):
# GH#29523
cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"])
df = DataFrame(range(10), columns=["bar"])
msg = (
rf"Length of values \({len(cat)}\) "
rf"does not match length of index \({len(df)}\)"
)
with pytest.raises(ValueError, match=msg):
df["foo"] = cat
def test_setitem_with_sparse_value(self):
# GH#8131
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_array = SparseArray([0, 0, 1])
df["new_column"] = sp_array
expected = Series(sp_array, name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_with_unaligned_sparse_value(self):
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_series = Series(SparseArray([0, 0, 1]), index=[2, 1, 0])
df["new_column"] = sp_series
expected = Series(SparseArray([1, 0, 0]), name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_dict_preserves_dtypes(self):
# https://github.com/pandas-dev/pandas/issues/34573
expected = DataFrame(
{
"a": Series([0, 1, 2], dtype="int64"),
"b": Series([1, 2, 3], dtype=float),
"c": Series([1, 2, 3], dtype=float),
}
)
df = DataFrame(
{
"a": Series([], dtype="int64"),
"b": Series([], dtype=float),
"c": Series([], dtype=float),
}
)
for idx, b in enumerate([1, 2, 3]):
df.loc[df.shape[0]] = {"a": int(idx), "b": float(b), "c": float(b)}
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"obj,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64", "right")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_setitem_extension_types(self, obj, dtype):
# GH: 34832
expected = DataFrame({"idx": [1, 2, 3], "obj": Series([obj] * 3, dtype=dtype)})
df = DataFrame({"idx": [1, 2, 3]})
df["obj"] = obj
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"ea_name",
[
dtype.name
for dtype in ea_registry.dtypes
# property would require instantiation
if not isinstance(dtype.name, property)
]
# mypy doesn't allow adding lists of different types
# https://github.com/python/mypy/issues/5492
+ ["datetime64[ns, UTC]", "period[D]"], # type: ignore[list-item]
)
def test_setitem_with_ea_name(self, ea_name):
# GH 38386
result = DataFrame([0])
result[ea_name] = [1]
expected = DataFrame({0: [0], ea_name: [1]})
tm.assert_frame_equal(result, expected)
def test_setitem_dt64_ndarray_with_NaT_and_diff_time_units(self):
# GH#7492
data_ns = np.array([1, "nat"], dtype="datetime64[ns]")
result = Series(data_ns).to_frame()
result["new"] = data_ns
expected = DataFrame({0: [1, None], "new": [1, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, "nat"], dtype="datetime64[s]")
result["new"] = data_s
expected = DataFrame({0: [1, None], "new": [1e9, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into a not-yet-existing column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df[unit] = vals
assert df[unit].dtype == np.dtype("M8[ns]")
assert (df[unit].values == ex_vals).all()
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_existing_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into an already-existing dt64 column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df["dates"] = np.arange(n, dtype=np.int64).view("M8[ns]")
# We overwrite existing dt64 column with new, non-nano dt64 vals
df["dates"] = vals
assert (df["dates"].values == ex_vals).all()
def test_setitem_dt64tz(self, timezone_frame):
df = timezone_frame
idx = df["B"].rename("foo")
# setitem
df["C"] = idx
tm.assert_series_equal(df["C"], Series(idx, name="C"))
df["D"] = "foo"
df["D"] = idx
tm.assert_series_equal(df["D"], Series(idx, name="D"))
del df["D"]
# assert that A & C are not sharing the same base (e.g. they
# are copies)
v1 = df._mgr.arrays[1]
v2 = df._mgr.arrays[2]
tm.assert_extension_array_equal(v1, v2)
v1base = v1._data.base
v2base = v2._data.base
assert v1base is None or (id(v1base) != id(v2base))
# with nan
df2 = df.copy()
df2.iloc[1, 1] = NaT
df2.iloc[1, 2] = NaT
result = df2["B"]
tm.assert_series_equal(notna(result), Series([True, False, True], name="B"))
tm.assert_series_equal(df2.dtypes, df.dtypes)
def test_setitem_periodindex(self):
rng = period_range("1/1/2000", periods=5, name="index")
df = DataFrame(np.random.randn(5, 3), index=rng)
df["Index"] = rng
rs = Index(df["Index"])
tm.assert_index_equal(rs, rng, check_names=False)
assert rs.name == "Index"
assert rng.name == "index"
rs = df.reset_index().set_index("index")
assert isinstance(rs.index, PeriodIndex)
tm.assert_index_equal(rs.index, rng)
def test_setitem_complete_column_with_array(self):
# GH#37954
df = DataFrame({"a": ["one", "two", "three"], "b": [1, 2, 3]})
arr = np.array([[1, 1], [3, 1], [5, 1]])
df[["c", "d"]] = arr
expected = DataFrame(
{
"a": ["one", "two", "three"],
"b": [1, 2, 3],
"c": [1, 3, 5],
"d": [1, 1, 1],
}
)
expected["c"] = expected["c"].astype(arr.dtype)
expected["d"] = expected["d"].astype(arr.dtype)
assert expected["c"].dtype == arr.dtype
assert expected["d"].dtype == arr.dtype
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("dtype", ["f8", "i8", "u8"])
def test_setitem_bool_with_numeric_index(self, dtype):
# GH#36319
cols = Index([1, 2, 3], dtype=dtype)
df = DataFrame(np.random.randn(3, 3), columns=cols)
df[False] = ["a", "b", "c"]
expected_cols = Index([1, 2, 3, False], dtype=object)
if dtype == "f8":
expected_cols = Index([1.0, 2.0, 3.0, False], dtype=object)
tm.assert_index_equal(df.columns, expected_cols)
@pytest.mark.parametrize("indexer", ["B", ["B"]])
def test_setitem_frame_length_0_str_key(self, indexer):
# GH#38831
df = DataFrame(columns=["A", "B"])
other = DataFrame({"B": [1, 2]})
df[indexer] = other
expected = DataFrame({"A": [np.nan] * 2, "B": [1, 2]})
expected["A"] = expected["A"].astype("object")
tm.assert_frame_equal(df, expected)
def test_setitem_frame_duplicate_columns(self, using_array_manager):
# GH#15695
cols = ["A", "B", "C"] * 2
df = DataFrame(index=range(3), columns=cols)
df.loc[0, "A"] = (0, 3)
df.loc[:, "B"] = (1, 4)
df["C"] = (2, 5)
expected = DataFrame(
[
[0, 1, 2, 3, 4, 5],
[np.nan, 1, 2, np.nan, 4, 5],
[np.nan, 1, 2, np.nan, 4, 5],
],
dtype="object",
)
if using_array_manager:
# setitem replaces column so changes dtype
expected.columns = cols
expected["C"] = expected["C"].astype("int64")
# TODO(ArrayManager) .loc still overwrites
expected["B"] = expected["B"].astype("int64")
else:
# set these with unique columns to be extra-unambiguous
expected[2] = expected[2].astype(np.int64)
expected[5] = expected[5].astype(np.int64)
expected.columns = cols
tm.assert_frame_equal(df, expected)
def test_setitem_frame_duplicate_columns_size_mismatch(self):
# GH#39510
cols = ["A", "B", "C"] * 2
df = DataFrame(index=range(3), columns=cols)
with pytest.raises(ValueError, match="Columns must be same length as key"):
df[["A"]] = (0, 3, 5)
df2 = df.iloc[:, :3] # unique columns
with pytest.raises(ValueError, match="Columns must be same length as key"):
df2[["A"]] = (0, 3, 5)
@pytest.mark.parametrize("cols", [["a", "b", "c"], ["a", "a", "a"]])
def test_setitem_df_wrong_column_number(self, cols):
# GH#38604
df = DataFrame([[1, 2, 3]], columns=cols)
rhs = DataFrame([[10, 11]], columns=["d", "e"])
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
df["a"] = rhs
def test_setitem_listlike_indexer_duplicate_columns(self):
# GH#38604
df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"])
rhs = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
df[["a", "b"]] = rhs
expected = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
tm.assert_frame_equal(df, expected)
df[["c", "b"]] = rhs
expected = DataFrame([[10, 11, 12, 10]], columns=["a", "b", "b", "c"])
tm.assert_frame_equal(df, expected)
def test_setitem_listlike_indexer_duplicate_columns_not_equal_length(self):
# GH#39403
df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"])
rhs = DataFrame([[10, 11]], columns=["a", "b"])
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
df[["a", "b"]] = rhs
def test_setitem_intervals(self):
df = DataFrame({"A": range(10)})
ser = cut(df["A"], 5)
assert isinstance(ser.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainder are converted to in-line objects
# containing an IntervalIndex.values
df["B"] = ser
df["C"] = np.array(ser)
df["D"] = ser.values
df["E"] = np.array(ser.values)
df["F"] = ser.astype(object)
assert is_categorical_dtype(df["B"].dtype)
assert is_interval_dtype(df["B"].cat.categories)
assert is_categorical_dtype(df["D"].dtype)
assert is_interval_dtype(df["D"].cat.categories)
# These go through the Series constructor and so get inferred back
# to IntervalDtype
assert is_interval_dtype(df["C"])
assert is_interval_dtype(df["E"])
# But the Series constructor doesn't do inference on Series objects,
# so setting df["F"] doesn't get cast back to IntervalDtype
assert is_object_dtype(df["F"])
# they compare equal as Index
# when converted to numpy objects
c = lambda x: Index(np.array(x))
tm.assert_index_equal(c(df.B), c(df.B))
tm.assert_index_equal(c(df.B), c(df.C), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
tm.assert_index_equal(c(df.C), c(df.D), check_names=False)
# B & D are the same Series
tm.assert_series_equal(df["B"], df["B"])
tm.assert_series_equal(df["B"], df["D"], check_names=False)
# C & E are the same Series
tm.assert_series_equal(df["C"], df["C"])
tm.assert_series_equal(df["C"], df["E"], check_names=False)
def test_setitem_categorical(self):
# GH#35369
df = DataFrame({"h": Series(list("mn")).astype("category")})
df.h = df.h.cat.reorder_categories(["n", "m"])
expected = DataFrame(
{"h": Categorical(["m", "n"]).reorder_categories(["n", "m"])}
)
tm.assert_frame_equal(df, expected)
def test_setitem_with_empty_listlike(self):
# GH#17101
index = Index([], name="idx")
result = DataFrame(columns=["A"], index=index)
result["A"] = []
expected = DataFrame(columns=["A"], index=index)
tm.assert_index_equal(result.index, expected.index)
@pytest.mark.parametrize(
"cols, values, expected",
[
(["C", "D", "D", "a"], [1, 2, 3, 4], 4), # with duplicates
(["D", "C", "D", "a"], [1, 2, 3, 4], 4), # mixed order
(["C", "B", "B", "a"], [1, 2, 3, 4], 4), # other duplicate cols
(["C", "B", "a"], [1, 2, 3], 3), # no duplicates
(["B", "C", "a"], [3, 2, 1], 1), # alphabetical order
(["C", "a", "B"], [3, 2, 1], 2), # in the middle
],
)
def test_setitem_same_column(self, cols, values, expected):
# GH#23239
df = DataFrame([values], columns=cols)
df["a"] = df["a"]
result = df["a"].values[0]
assert result == expected
def test_setitem_multi_index(self):
# GH#7655, test that assigning to a sub-frame of a frame
# with multi-index columns aligns both rows and columns
it = ["jim", "joe", "jolie"], ["first", "last"], ["left", "center", "right"]
cols = MultiIndex.from_product(it)
index = date_range("20141006", periods=20)
vals = np.random.randint(1, 1000, (len(index), len(cols)))
df = DataFrame(vals, columns=cols, index=index)
i, j = df.index.values.copy(), it[-1][:]
np.random.shuffle(i)
df["jim"] = df["jolie"].loc[i, ::-1]
tm.assert_frame_equal(df["jim"], df["jolie"])
np.random.shuffle(j)
df[("joe", "first")] = df[("jolie", "last")].loc[i, j]
tm.assert_frame_equal(df[("joe", "first")], df[("jolie", "last")])
np.random.shuffle(j)
df[("joe", "last")] = df[("jolie", "first")].loc[i, j]
tm.assert_frame_equal(df[("joe", "last")], df[("jolie", "first")])
@pytest.mark.parametrize(
"columns,box,expected",
[
(
["A", "B", "C", "D"],
7,
DataFrame(
[[7, 7, 7, 7], [7, 7, 7, 7], [7, 7, 7, 7]],
columns=["A", "B", "C", "D"],
),
),
(
["C", "D"],
[7, 8],
DataFrame(
[[1, 2, 7, 8], [3, 4, 7, 8], [5, 6, 7, 8]],
columns=["A", "B", "C", "D"],
),
),
(
["A", "B", "C"],
np.array([7, 8, 9], dtype=np.int64),
DataFrame([[7, 8, 9], [7, 8, 9], [7, 8, 9]], columns=["A", "B", "C"]),
),
(
["B", "C", "D"],
[[7, 8, 9], [10, 11, 12], [13, 14, 15]],
DataFrame(
[[1, 7, 8, 9], [3, 10, 11, 12], [5, 13, 14, 15]],
columns=["A", "B", "C", "D"],
),
),
(
["C", "A", "D"],
np.array([[7, 8, 9], [10, 11, 12], [13, 14, 15]], dtype=np.int64),
DataFrame(
[[8, 2, 7, 9], [11, 4, 10, 12], [14, 6, 13, 15]],
columns=["A", "B", "C", "D"],
),
),
(
["A", "C"],
DataFrame([[7, 8], [9, 10], [11, 12]], columns=["A", "C"]),
DataFrame(
[[7, 2, 8], [9, 4, 10], [11, 6, 12]], columns=["A", "B", "C"]
),
),
],
)
def test_setitem_list_missing_columns(self, columns, box, expected):
# GH#29334
df = DataFrame([[1, 2], [3, 4], [5, 6]], columns=["A", "B"])
df[columns] = box
tm.assert_frame_equal(df, expected)
def test_setitem_list_of_tuples(self, float_frame):
tuples = list(zip(float_frame["A"], float_frame["B"]))
float_frame["tuples"] = tuples
result = float_frame["tuples"]
expected = Series(tuples, index=float_frame.index, name="tuples")
tm.assert_series_equal(result, expected)
def test_setitem_iloc_generator(self):
# GH#39614
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
indexer = (x for x in [1, 2])
df.iloc[indexer] = 1
expected = DataFrame({"a": [1, 1, 1], "b": [4, 1, 1]})
tm.assert_frame_equal(df, expected)
def test_setitem_iloc_two_dimensional_generator(self):
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
indexer = (x for x in [1, 2])
df.iloc[indexer, 1] = 1
expected = DataFrame({"a": [1, 2, 3], "b": [4, 1, 1]})
tm.assert_frame_equal(df, expected)
class TestSetitemTZAwareValues:
@pytest.fixture
def idx(self):
naive = DatetimeIndex(["2013-1-1 13:00", "2013-1-2 14:00"], name="B")
idx = naive.tz_localize("US/Pacific")
return idx
@pytest.fixture
def expected(self, idx):
expected = Series(np.array(idx.tolist(), dtype="object"), name="B")
assert expected.dtype == idx.dtype
return expected
def test_setitem_dt64series(self, idx, expected):
# convert to utc
df = DataFrame(np.random.randn(2, 1), columns=["A"])
df["B"] = idx
with tm.assert_produces_warning(FutureWarning) as m:
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
result = df["B"]
comp = Series(idx.tz_convert("UTC").tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
def test_setitem_datetimeindex(self, idx, expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
def test_setitem_object_array_of_tzaware_datetimes(self, idx, expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# object array of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
class TestDataFrameSetItemWithExpansion:
# TODO(ArrayManager) update parent (_maybe_update_cacher)
@td.skip_array_manager_not_yet_implemented
def test_setitem_listlike_views(self):
# GH#38148
df = DataFrame({"a": [1, 2, 3], "b": [4, 4, 6]})
# get one column as a view of df
ser = df["a"]
# add columns with list-like indexer
df[["c", "d"]] = np.array([[0.1, 0.2], [0.3, 0.4], [0.4, 0.5]])
# edit in place the first column to check view semantics
df.iloc[0, 0] = 100
expected = Series([100, 2, 3], name="a")
tm.assert_series_equal(ser, expected)
def test_setitem_string_column_numpy_dtype_raising(self):
# GH#39010
df = DataFrame([[1, 2], [3, 4]])
df["0 - Name"] = [5, 6]
expected = DataFrame([[1, 2, 5], [3, 4, 6]], columns=[0, 1, "0 - Name"])
tm.assert_frame_equal(df, expected)
def test_setitem_empty_df_duplicate_columns(self):
# GH#38521
df = DataFrame(columns=["a", "b", "b"], dtype="float64")
df.loc[:, "a"] = list(range(2))
expected = DataFrame(
[[0, np.nan, np.nan], [1, np.nan, np.nan]], columns=["a", "b", "b"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_with_expansion_categorical_dtype(self):
# assignment
df = DataFrame(
{"value": np.array(np.random.randint(0, 10000, 100), dtype="int32")}
)
labels = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
df = df.sort_values(by=["value"], ascending=True)
ser = cut(df.value, range(0, 10500, 500), right=False, labels=labels)
cat = ser.values
# setting with a Categorical
df["D"] = cat
str(df)
result = df.dtypes
expected = Series(
[np.dtype("int32"), CategoricalDtype(categories=labels, ordered=False)],
index=["value", "D"],
)
tm.assert_series_equal(result, expected)
# setting with a Series
df["E"] = ser
str(df)
result = df.dtypes
expected = Series(
[
np.dtype("int32"),
CategoricalDtype(categories=labels, ordered=False),
CategoricalDtype(categories=labels, ordered=False),
],
index=["value", "D", "E"],
)
tm.assert_series_equal(result, expected)
result1 = df["D"]
result2 = df["E"]
tm.assert_categorical_equal(result1._mgr.array, cat)
# sorting
ser.name = "E"
tm.assert_series_equal(result2.sort_index(), ser.sort_index())
def test_setitem_scalars_no_index(self):
# GH#16823 / GH#17894
df = DataFrame()
df["foo"] = 1
expected = DataFrame(columns=["foo"]).astype(np.int64)
tm.assert_frame_equal(df, expected)
def test_setitem_newcol_tuple_key(self, float_frame):
assert (
"A",
"B",
) not in float_frame.columns
float_frame["A", "B"] = float_frame["A"]
assert ("A", "B") in float_frame.columns
result = float_frame["A", "B"]
expected = float_frame["A"]
tm.assert_series_equal(result, expected, check_names=False)
def test_frame_setitem_newcol_timestamp(self):
# GH#2155
columns = date_range(start="1/1/2012", end="2/1/2012", freq=BDay())
data = DataFrame(columns=columns, index=range(10))
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works, mostly a smoke-test
assert np.isnan(data[ts]).all()
class TestDataFrameSetItemSlicing:
def test_setitem_slice_position(self):
# GH#31469
df = DataFrame(np.zeros((100, 1)))
df[-4:] = 1
arr = np.zeros((100, 1))
arr[-4:] = 1
expected = DataFrame(arr)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [tm.setitem, tm.iloc])
@pytest.mark.parametrize("box", [Series, np.array, list])
@pytest.mark.parametrize("n", [1, 2, 3])
def test_setitem_broadcasting_rhs(self, n, box, indexer):
# GH#40440
# TODO: Add pandas array as box after GH#40933 is fixed
df = DataFrame([[1, 3, 5]] + [[2, 4, 6]] * n, columns=["a", "b", "c"])
indexer(df)[1:] = box([10, 11, 12])
expected = DataFrame([[1, 3, 5]] + [[10, 11, 12]] * n, columns=["a", "b", "c"])
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [tm.setitem, tm.iloc])
@pytest.mark.parametrize("box", [Series, np.array, list])
@pytest.mark.parametrize("n", [1, 2, 3])
def test_setitem_broadcasting_rhs_mixed_dtypes(self, n, box, indexer):
# GH#40440
# TODO: Add pandas array as box after GH#40933 is fixed
df = DataFrame(
[[1, 3, 5], ["x", "y", "z"]] + [[2, 4, 6]] * n, columns=["a", "b", "c"]
)
indexer(df)[1:] = box([10, 11, 12])
expected = DataFrame(
[[1, 3, 5]] + [[10, 11, 12]] * (n + 1),
columns=["a", "b", "c"],
dtype="object",
)
tm.assert_frame_equal(df, expected)
class TestDataFrameSetItemCallable:
def test_setitem_callable(self):
# GH#12533
df = DataFrame({"A": [1, 2, 3, 4], "B": [5, 6, 7, 8]})
df[lambda x: "A"] = [11, 12, 13, 14]
exp = DataFrame({"A": [11, 12, 13, 14], "B": [5, 6, 7, 8]})
| tm.assert_frame_equal(df, exp) | pandas._testing.assert_frame_equal |
import os, unittest, pandas as pd, numpy as np
from saspt.trajectory_group import TrajectoryGroup
from saspt.constants import TRACK, FRAME, PY, PX, TRACK_LENGTH, JUMPS_PER_TRACK, DFRAMES, DR2, DY, DX, RBME
from saspt.utils import track_length
from saspt.io import is_detections
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
FIXTURES = os.path.join(TEST_DIR, "fixtures")
class TestTrajectoryGroup(unittest.TestCase):
def setUp(self):
# Simple set of trajectories
self.sample_detections = pd.DataFrame({
TRACK: [ 0, 1, 1, -1, 3, 3, 3, 4, 4],
FRAME: [ 0, 0, 1, 1, 1, 2, 3, 6, 7],
PY: [ 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8],
PX: [ 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0]
})
# More complex set of trajectories
self.track_csv = os.path.join(FIXTURES, "sample_tracks.csv")
# Sample set of TrajectoryGroup initialization kwargs
self.init_kwargs = dict(pixel_size_um=0.160, frame_interval=0.00748,
splitsize=10, start_frame=0)
def tearDown(self):
pass
def test_split_tracks(self):
# Test 1: Small set of trajectories with ground truth answer
splitsize = 1
old_indices = np.array([0, 0, 1, 3, 3, 3, 5, 5])
new_indices = TrajectoryGroup.split_tracks(old_indices, splitsize)
assert (new_indices == np.array([0, 0, 1, 2, 2, 3, 4, 4])).all(), new_indices
# Test 2: Large set of trajectories
splitsize = 4
detections = pd.read_csv(self.track_csv).sort_values(by=[TRACK, FRAME]).reset_index(drop=True)
old_indices = np.array(detections[TRACK])
new_indices = TrajectoryGroup.split_tracks(old_indices, splitsize)
assert (new_indices >= 0).all()
# There are no "gaps" in trajectory indices
T = np.unique(new_indices)
T.sort()
assert ((T[1:] - T[:-1]) == 1).all()
# Each new trajectory contains detections from exactly one old trajectory
T = pd.DataFrame({'old': old_indices, TRACK: new_indices, FRAME: detections[FRAME]})
assert (T.groupby(TRACK)["old"].nunique() == 1).all()
# No trajectories have more than *splitsize+1* detections
assert (T.groupby(TRACK).size() <= splitsize+1).all()
# Within each new trajectory, frame indices are monotonically increasing
x = np.asarray(T[[TRACK, FRAME]])
diff = x[1:,:] - x[:-1,:]
assert (diff[diff[:,0]==0, 1] == 1).all()
# Test 3: Empty set of detections
new_indices = TrajectoryGroup.split_tracks(old_indices[:0], splitsize)
assert isinstance(new_indices, np.ndarray)
assert new_indices.shape[0] == 0
def test_preprocess(self):
# Large set of tracks
detections = pd.read_csv(self.track_csv)
# Preprocess
splitsize = 4
start_frame = 100
processed = TrajectoryGroup.preprocess(detections.copy(), splitsize, start_frame)
# Should not contain detections before the start frame
assert (processed[FRAME] >= start_frame).all()
# TRACK_LENGTH should be correct
assert (processed[TRACK_LENGTH] == track_length(processed.copy())[TRACK_LENGTH]).all()
# TRACK should be monotonically increasing
assert processed[TRACK].is_monotonic_increasing
# Should be no gaps in trajectory indices
T = np.array(processed[TRACK].unique())
T.sort()
assert ((T[1:] - T[:-1]) == 1).all()
# FRAME should be monotonically increasing in each trajectory
T = np.asarray(processed[[TRACK, FRAME]])
diff = T[1:,:] - T[:-1,:]
assert (diff[diff[:,0]==0,1] == 1).all()
# Should not contain unassigned detections or singlets
assert (processed[TRACK] >= 0).all()
assert (processed[TRACK_LENGTH] > 1).all()
# The map to the original detections/trajectories should be correct
DETECT_INDEX = TrajectoryGroup.DETECT_INDEX
ORIG_TRACK = TrajectoryGroup.ORIG_TRACK
for col in [PY, PX, FRAME]:
assert (np.abs(
processed[col] - processed[DETECT_INDEX].map(detections[col])
) <= 1.0e-6).all()
assert (processed[ORIG_TRACK] == processed[DETECT_INDEX].map(detections[TRACK])).all()
# Should be idempotent
processed_again = TrajectoryGroup.preprocess(processed.copy(), splitsize, start_frame)
assert len(processed_again) == len(processed)
for col in [PY, PX, TRACK, FRAME]:
assert (np.abs(processed_again[col] - processed[col]) <= 1.0e-6).all()
# Should work on empty dataframes
processed = TrajectoryGroup.preprocess(detections[:0], splitsize, start_frame)
assert processed.empty
assert all(map(lambda c: c in processed.columns, detections.columns))
# Should work when all detections are before the start frame
T = detections.copy()
T[FRAME] -= 1000
processed = TrajectoryGroup.preprocess(T, splitsize, start_frame)
assert processed.empty
assert all(map(lambda c: c in processed.columns, T.columns))
# Should work on all singlets
T = detections.groupby(TRACK, as_index=False).first()
processed = TrajectoryGroup.preprocess(T, splitsize, start_frame)
assert processed.empty
assert all(map(lambda c: c in processed.columns, T.columns))
def test_init(self):
""" Test initialization of the TrajectoryGroup object. """
T = TrajectoryGroup(pd.read_csv(self.track_csv), **self.init_kwargs)
for k, v in self.init_kwargs.items():
assert abs(getattr(T, k) - v) <= 1.0e-6, k
# Set of trajectories is already preprocessed
processed = TrajectoryGroup.preprocess(T.detections)
assert len(processed) == len(T.detections)
for col in [PY, PX, TRACK, FRAME]:
assert (np.abs(processed[col] - T.detections[col]) <= 1.0e-6).all()
# Attributes are correct
assert T.n_detections == len(T.detections)
assert T.n_jumps == len(T.jumps)
assert T.n_tracks == T.detections[TRACK].nunique()
def test_jumps(self):
""" Check for correctness and internal consistency of TrajectoryGroup.jumps """
T = TrajectoryGroup( | pd.read_csv(self.track_csv) | pandas.read_csv |
import datetime
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_almost_equal, assert_allclose
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pvlib.location import Location
from pvlib import clearsky
from pvlib import solarposition
from pvlib import irradiance
from pvlib import atmosphere
from conftest import (requires_ephem, requires_numba, needs_numpy_1_10,
pandas_0_22)
# setup times and location to be tested.
tus = Location(32.2, -111, 'US/Arizona', 700)
# must include night values
times = pd.date_range(start='20140624', freq='6H', periods=4, tz=tus.tz)
ephem_data = solarposition.get_solarposition(
times, tus.latitude, tus.longitude, method='nrel_numpy')
irrad_data = tus.get_clearsky(times, model='ineichen', linke_turbidity=3)
dni_et = irradiance.extraradiation(times.dayofyear)
ghi = irrad_data['ghi']
# setup for et rad test. put it here for readability
timestamp = pd.Timestamp('20161026')
dt_index = pd.DatetimeIndex([timestamp])
doy = timestamp.dayofyear
dt_date = timestamp.date()
dt_datetime = datetime.datetime.combine(dt_date, datetime.time(0))
dt_np64 = np.datetime64(dt_datetime)
value = 1383.636203
@pytest.mark.parametrize('input, expected', [
(doy, value),
(np.float64(doy), value),
(dt_date, value),
(dt_datetime, value),
(dt_np64, value),
(np.array([doy]), np.array([value])),
(pd.Series([doy]), np.array([value])),
(dt_index, pd.Series([value], index=dt_index)),
(timestamp, value)
])
@pytest.mark.parametrize('method', [
'asce', 'spencer', 'nrel', requires_ephem('pyephem')])
def test_extraradiation(input, expected, method):
out = irradiance.extraradiation(input)
assert_allclose(out, expected, atol=1)
@requires_numba
def test_extraradiation_nrel_numba():
result = irradiance.extraradiation(times, method='nrel', how='numba', numthreads=8)
assert_allclose(result, [1322.332316, 1322.296282, 1322.261205, 1322.227091])
def test_extraradiation_epoch_year():
out = irradiance.extraradiation(doy, method='nrel', epoch_year=2012)
assert_allclose(out, 1382.4926804890767, atol=0.1)
def test_extraradiation_invalid():
with pytest.raises(ValueError):
irradiance.extraradiation(300, method='invalid')
def test_grounddiffuse_simple_float():
result = irradiance.grounddiffuse(40, 900)
assert_allclose(result, 26.32000014911496)
def test_grounddiffuse_simple_series():
ground_irrad = irradiance.grounddiffuse(40, ghi)
assert ground_irrad.name == 'diffuse_ground'
def test_grounddiffuse_albedo_0():
ground_irrad = irradiance.grounddiffuse(40, ghi, albedo=0)
assert 0 == ground_irrad.all()
def test_grounddiffuse_albedo_invalid_surface():
with pytest.raises(KeyError):
irradiance.grounddiffuse(40, ghi, surface_type='invalid')
def test_grounddiffuse_albedo_surface():
result = irradiance.grounddiffuse(40, ghi, surface_type='sand')
assert_allclose(result, [0, 3.731058, 48.778813, 12.035025], atol=1e-4)
def test_isotropic_float():
result = irradiance.isotropic(40, 100)
assert_allclose(result, 88.30222215594891)
def test_isotropic_series():
result = irradiance.isotropic(40, irrad_data['dhi'])
assert_allclose(result, [0, 35.728402, 104.601328, 54.777191], atol=1e-4)
def test_klucher_series_float():
result = irradiance.klucher(40, 180, 100, 900, 20, 180)
assert_allclose(result, 88.3022221559)
def test_klucher_series():
result = irradiance.klucher(40, 180, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'],
ephem_data['azimuth'])
assert_allclose(result, [0, 37.446276, 109.209347, 56.965916], atol=1e-4)
def test_haydavies():
result = irradiance.haydavies(40, 180, irrad_data['dhi'], irrad_data['dni'],
dni_et,
ephem_data['apparent_zenith'],
ephem_data['azimuth'])
assert_allclose(result, [0, 14.967008, 102.994862, 33.190865], atol=1e-4)
def test_reindl():
result = irradiance.reindl(40, 180, irrad_data['dhi'], irrad_data['dni'],
irrad_data['ghi'], dni_et,
ephem_data['apparent_zenith'],
ephem_data['azimuth'])
assert_allclose(result, [np.nan, 15.730664, 104.131724, 34.166258], atol=1e-4)
def test_king():
result = irradiance.king(40, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'])
assert_allclose(result, [0, 44.629352, 115.182626, 79.719855], atol=1e-4)
def test_perez():
am = atmosphere.relativeairmass(ephem_data['apparent_zenith'])
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'], dni,
dni_et, ephem_data['apparent_zenith'],
ephem_data['azimuth'], am)
expected = pd.Series(np.array(
[ 0. , 31.46046871, np.nan, 45.45539877]),
index=times)
assert_series_equal(out, expected, check_less_precise=2)
def test_perez_components():
am = atmosphere.relativeairmass(ephem_data['apparent_zenith'])
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out, df_components = irradiance.perez(40, 180, irrad_data['dhi'], dni,
dni_et, ephem_data['apparent_zenith'],
ephem_data['azimuth'], am, return_components=True)
expected = pd.Series(np.array(
[ 0. , 31.46046871, np.nan, 45.45539877]),
index=times)
expected_components = pd.DataFrame(
np.array([[ 0. , 26.84138589, np.nan, 31.72696071],
[ 0. , 0. , np.nan, 4.47966439],
[ 0. , 4.62212181, np.nan, 9.25316454]]).T,
columns=['isotropic', 'circumsolar', 'horizon'],
index=times
)
if pandas_0_22():
expected_for_sum = expected.copy()
expected_for_sum.iloc[2] = 0
else:
expected_for_sum = expected
sum_components = df_components.sum(axis=1)
assert_series_equal(out, expected, check_less_precise=2)
assert_frame_equal(df_components, expected_components)
assert_series_equal(sum_components, expected_for_sum, check_less_precise=2)
@needs_numpy_1_10
def test_perez_arrays():
am = atmosphere.relativeairmass(ephem_data['apparent_zenith'])
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'].values, dni.values,
dni_et, ephem_data['apparent_zenith'].values,
ephem_data['azimuth'].values, am.values)
expected = np.array(
[ 0. , 31.46046871, np.nan, 45.45539877])
assert_allclose(out, expected, atol=1e-2)
def test_liujordan():
expected = pd.DataFrame(np.
array([[863.859736967, 653.123094076, 220.65905025]]),
columns=['ghi', 'dni', 'dhi'],
index=[0])
out = irradiance.liujordan(
pd.Series([10]), pd.Series([0.5]), | pd.Series([1.1]) | pandas.Series |
#!/usr/bin/python3
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from typing import Any, Dict
from dotenv import load_dotenv
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import ComplementNB, MultinomialNB, BernoulliNB
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split, cross_validate
from sklearn.metrics import (
precision_score,
accuracy_score,
recall_score,
f1_score,
confusion_matrix,
make_scorer,
classification_report,
log_loss,
)
from cltk.alphabet.grc import normalize_grc
from lexicogenero.ferramentas.diorisis_reader import (
carrega_textos,
em_pandas,
sent_pandas,
)
from lexicogenero.ferramentas.data import gera_hist_filo, gera_paragrafo, gera_sent
from lexicogenero.grc import STOPS_LIST
import logging
logging.basicConfig(
filename="data/log.log",
level=logging.INFO,
format="%(asctime)s %(message)s",
)
# Carrega path para o Diorisis a depender do especificado em ../.env,
# rompe runtime caso não esteja especificada.
load_dotenv()
DIORISIS_PATH = os.getenv("DIORISIS_PATH")
PERSEUS_PATH = os.getenv("PERSEUS_PATH")
assert DIORISIS_PATH is not None, "Path para DIORISIS não especificada"
assert DIORISIS_PATH is not None, "Path para PERSEUS não especificada"
if __name__ == "__main__":
plt.rcParams["figure.figsize"] = [20, 5]
plt.style.use("ggplot")
sns.set_palette("Dark2")
print("Gerando banco de dados")
logging.info("Gerando banco de dados")
DATA = "/home/silenus/proj/trabalho-lingcomp/data/data_classico.csv"
SENTS = "/home/silenus/proj/trabalho-lingcomp/data/sents_classico.csv"
if os.path.exists(DATA) and os.path.exists(SENTS):
print(f"Carregando arquivos:\n\t{DATA}\n\t{SENTS}")
logging.info(f"Carregando arquivos:\n\t{DATA}\n\t{SENTS}")
df_tokens: pd.Dataframe = pd.read_csv(DATA)
df_sents: pd.Dataframe = | pd.read_csv(SENTS) | pandas.read_csv |
"""
Last edited: January 20 2020
|br| @author: FINE Developer Team (FZJ IEK-3) \n\n
The approaches used are described in
Robinius et. al. (2019) "Robust Optimal Discrete Arc Sizing for Tree-Shaped Potential Networks"
and they are further developed with the help of
Theorem 10 of Labbé et. al. (2019) "Bookings in the European gas market: characterisation of feasibility and
computational complexity results"
and Lemma 3.4 and 3.5 of Schewe et. al. (preprint 2020) "Computing Technical Capacities in the European Entry-Exit
Gas Market is NP-Hard"
"""
import pandas as pd
from FINE import utils
import networkx as nx
import math
import pyomo.environ as py
import warnings
from pyomo.opt import SolverFactory, SolverStatus, TerminationCondition
import numpy as np
import copy
from scipy.optimize import fsolve
import matplotlib.pyplot as plt
import matplotlib as mpl
import shapely as shp
import time
from multiprocessing import Pool
import sys
from functools import partial
try:
import geopandas as gpd
except ImportError:
warnings.warn('The GeoPandas python package could not be imported.')
# local type und value checker
def isPandasDataFrameNumber(dataframe):
# check if dataframe is a pandas dataframe and if each value is float or int
if not isinstance(dataframe, pd.DataFrame):
raise TypeError("The input argument has to be a pandas DataFrame")
else:
if not dataframe.select_dtypes(exclude=["float", "int"]).empty:
raise ValueError("The input pandas DataFrame has to contain only floats or ints")
def isPandasSeriesPositiveNumber(pandasSeries):
# Check if the input argument is a pandas series and it contains only positive numbers
if not isinstance(pandasSeries, pd.Series):
raise TypeError("The input argument has to be a pandas series")
else:
for index in pandasSeries.index:
utils.isPositiveNumber(pandasSeries[index])
def isNetworkxGraph(graph):
# Check if the input argument is a networkx graph
if not isinstance(graph, nx.Graph):
raise TypeError("The input argument has to be a networkx graph")
def isDictionaryPositiveNumber(dictionary):
# Check if the input argument is a dictionary with positive numbers as values
if not isinstance(dictionary, dict):
raise TypeError("The input argument has to be a dictionary")
else:
for key in dictionary.keys():
utils.isPositiveNumber(dictionary[key])
def checkLowerUpperBoundsOfDicts(lowerDict, upperDict):
# check if lowerDict and upperDict have the same keys and if lowerDict[key] <= upperDict[key] holds
if not (lowerDict.keys() == upperDict.keys()):
raise ValueError("The input arguments have to have the same keys")
else:
for key in lowerDict.keys():
if lowerDict[key] > upperDict[key]:
raise ValueError("The lower bound has to be the smaller than the upper bound")
def isListOfStrings(strings):
# check if strings is list of strings
if not isinstance(strings, list):
raise TypeError("The input argument has to be a list")
else:
for string in strings:
utils.isString(string)
def isBool(boolean):
# check if boolean is a bool
if not isinstance(boolean, bool):
raise TypeError("The input argument has to be a bool")
# End utils checks
def getInjectionWithdrawalRates(componentName='', esM=None, operationVariablesOptimumData=None):
"""
Determines the injection and withdrawal rates into a network from a component in an
EnergySystemModel object or based on the fluid flow data.
:param componentName: name of the network component in the EnergySystemModel class
(only required the fluid flows are to be obtained from the EnergySystemModel class)
|br| * the default value is ''
:type componentName: string
:param esM: EnergySystemModel object with an optimized Pyomo instance (only needs to be
specified if the operationVariablesOptimumData are to be obtained from the
EnergySystemModel object)
|br| * the default value is None
:type esM: FINE EnergySystemModel
:param operationVariablesOptimumData: the injection and withdrawal rates into and out of the
network can either be obtained from a DataFrame with the original fluid flows or an
EnergySystemModel with an optimized Pyomo instance.
In the former case, the argument is a pandas DataFrame with two index columns (specifying
the names of the start and end node of a pipeline) and one index row (for the time steps).
The data in the DataFrame denotes the flow coming from the start node and going to the end
node [e.g. in kWh or Nm^3]. Example:
0 1 ... 8759
node1 node2 0.1 0.0 ... 0.9
node2 node3 0.0 0.3 ... 0.4
node2 node1 0.9 0.9 ... 0.2
node3 node2 1.1 0.2 ... 0.9
|br| * the default value is None
:type operationVariablesOptimumData: pandas DataFrame with non-negative floats
:return: injection and withdrawal rates (withdrawals from the network are positive while
injections are negative)
:rtype: pandas DataFrame
"""
#TODO check type and value correctness
# Get the original optimal operation variables
if operationVariablesOptimumData is not None:
op = operationVariablesOptimumData
else:
op = esM.componentModelingDict[esM.componentNames[componentName]]. \
getOptimalValues('operationVariablesOptimum')['values'].loc[componentName]
# Get a map of the component's network
if esM is None:
mapN = {}
for conn in operationVariablesOptimumData.index:
loc, loc_ = conn
mapN.setdefault(loc, {}).update({loc_: loc + '_' + loc_})
mapN.setdefault(loc_, {}).update({loc: loc_ + '_' + loc})
else:
mapN = esM.getComponent(componentName)._mapL
# Initialize list for nodal injection and withdrawal time series data
injectionWithdrawalRates, nodeIx = [], []
# Reset connections set (not all indices might be in the operationVariablesOptimumData data)
connections = set()
# For each node loc, compute the injection and withdrawal rates
for loc, locConn in mapN.items():
# As in a few cases zero columns/ rows are dropped from data frames, two lists
# of eligible connection indices are created.
ixIn, ixOut = [], []
for loc_, conn in locConn.items():
if (loc, loc_) in op.index:
ixOut.append((loc, loc_)), connections.add((loc, loc_))
if (loc_, loc) in op.index:
ixIn.append((loc_, loc)), connections.add((loc_, loc))
# If either list has at least one entry, the incoming and outgoing flows are selected
# from the original optimal flow variables and aggregated. The resulting commodity
# withdrawals from the network are positive while injections are negative.
if (len(ixIn) != 0) | (len(ixOut) != 0):
injectionWithdrawalRates.append(op.loc[ixIn].sum() - op.loc[ixOut].sum())
nodeIx.append(loc)
# Concat data to a pandas dataframe
injectionWithdrawalRates = pd.concat(injectionWithdrawalRates, keys=nodeIx, axis=1)
return injectionWithdrawalRates
def getNetworkLengthsFromESM(componentName, esM):
"""
Obtains the pipeline lengths of a transmission component in an EnergySystemModel class.
:param componentName: name of the network component in the EnergySystemModel class
(only required if the fluid flows are to be obtained from the EnergySystemModel class)
|br| * the default value is ''
:type componentName: string
:param esM: EnergySystemModel object with an optimized Pyomo instance (only needs to be
specified if the operationVariablesOptimumData are to be obtained from the
EnergySystemModel object)
|br| * the default value is None
:type esM: FINE EnergySystemModel
:return: pipeline distances in the length unit specified in the esM object
:rtype: pandas series
"""
utils.isString(componentName)
utils.isEnergySystemModelInstance(esM)
distances = esM.getComponent(componentName).distances.copy()
indexMap = esM.getComponent(componentName)._mapC
distances.index = [indexMap[ix] for ix in distances.index]
return distances
def getRefinedShapeFile(shapeFilePath, regColumn1, regColumn2, dic_node_minPress, dic_node_maxPress, minPipeLength, maxPipeLength):
"""
If a pipe is longer than maxPipeLength than it will be split into several pipes with equidistant length,
i.e., replace arc (u,v) by (u,v_1), (v_1,v_2),..., (v_n,v) with n = ceil(lengthOfPipe/maxPipeLength) -1
:param shapeFilePath: path to a shape file which connects the gas injection/ withdrawal nodes with each other. The rows of the
file describe connections between the injection/ withdrawal nodes. The required geometry of these connections is a shapely
LineString. Additionally, the file has two columns holding the names of the two injection/ withdrawal nodes (start and end
point of the LineString).
:type shapeFilePath: string
:param regColumn1: name of the column which holds the name of the injection/ withdrawal node at the beginning of the line
:type regColumn1: string
:param regColumn2: name of the column which holds the name of the injection/ withdrawal node at the end of the line
:type regColumn2: string
:param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]
:type dic_node_minPress: dictionary: key: node of the network, value: non-negative float
:param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar].
It holds: dic_node_minPress[index] <= dic_node_maxPress[index].
:type dic_node_maxPress: dictionary key: node of the network, value: non-negative float
:param minPipeLength: desired minimum length of a pipe in [m], note: not always possible to achieve.
:type minPipeLength: positive number
:param maxPipeLength: determines the maximal length of a pipe in [m].
:type maxPipeLength: positive number
:return: distances_new - pipeline distances in m
:rtype: pandas series
:return: dic_node_minPress_new - dictionary that contains for every node of the network its lower pressure bound in [bar]
:rtype: dictionary key: node of the network, value: non-negative float
:return: dic_node_maxPress_new - dictionary that contains for every node of the network its upper pressure bound in [bar]
:rtype: dictionary key: node of the network, value: non-negative float
:return: gdfNodes - GeoDataFrame with the nodes of the network and their names
:rtype: geopandas GeoDataFrame
:return: gdfEdges - GeoDataFrame with the edges of the network and the names of their start and end nodes
:rtype: geopandas GeoDataFrame
"""
# type and value check
isDictionaryPositiveNumber(dic_node_minPress)
isDictionaryPositiveNumber(dic_node_maxPress)
checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)
utils.isString(regColumn1), utils.isString(regColumn2)
utils.isStrictlyPositiveNumber(maxPipeLength)
utils.isStrictlyPositiveNumber(minPipeLength)
# Read shape file with linestrings connecting the entry/ exit nodes of the gas
gdf=gpd.read_file(shapeFilePath)
if not (gdf.geometry.type == 'LineString').all():
raise ValueError("Geometries of the shape file have to be LineStrings")
print('Number of edges before segmentation:', len(gdf))
originalNodesSet = set(gdf[regColumn1]) | set(gdf[regColumn2])
print('Number of nodes before segmentation:', len(originalNodesSet))
# Obtain nodes from shape file, assign names and minimum/ maximum pressure levels to them, delete duplicates
coordNames, coords = [], []
pMin, pMax = [], []
lines = []
# Break linestrings into linear pieces
for i, row in gdf.iterrows():
# Simplify linestring (to increase the minimum length of pipeline connections wherever possible)
line = row.geometry.simplify(minPipeLength)
lines.append(line)
row.geometry = line
# Get new nodes
coords_ = [i for i in line.coords]
coords.extend(coords_)
coordNames_ = [row[regColumn1]]
coordNames_.extend([row[regColumn1] + '_' + row[regColumn2] + '_' + str(j)
for j in range(len(coords_)-2)])
coordNames_.append(row[regColumn2])
coordNames.extend(coordNames_)
# Get averaged lower and upper pressure levels
pMin.extend([(dic_node_minPress[row[regColumn1]]*(len(coords_)-j-1) +
dic_node_minPress[row[regColumn2]]*j)/(len(coords_)-1) for j in range(len(coords_))])
pMax.extend([(dic_node_maxPress[row[regColumn1]]*(len(coords_)-j-1) +
dic_node_maxPress[row[regColumn2]]*j)/(len(coords_)-1) for j in range(len(coords_))])
gdf['geometry'] = lines
# Create DataFrame of old and new nodes and drop duplicates
dfNodes = | pd.DataFrame([coordNames, pMin, pMax, coords], index=['nodeName','pMin','pMax','lon_lat']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 30 13:58:37 2020
@author: qwang
"""
import os
import shutil
import re
import json
import numpy as np
from pathlib import Path
import torch
import torch.nn as nn
#%% Load conll data
def load_conll(filename):
'''
tokens_seqs : list. Each element is a list of tokens for one sequence/doc
tokens_seqs[21] --> ['Leicestershire', '22', 'points', ',', 'Somerset', '4', '.']
tags_seqs : list. Each element is a list of tags for one sequence/doc
tags_seqs[21] --> ['B-ORG', 'O', 'O', 'O', 'B-ORG', 'O', 'O']
'''
filename = Path(filename)
raw_text = filename.read_text().strip()
raw_seqs = re.split(r'\n\t?\n', raw_text)
raw_seqs = [seq for seq in raw_seqs if '-DOCSTART' not in seq]
tokens_seqs, tags_seqs = [], []
for raw_seq in raw_seqs:
seq = raw_seq.split('\n')
tokens, tags = [], []
for line in seq:
splits = line.split(' ')
tokens.append(splits[0])
tags.append(splits[-1].rstrip("\n"))
tokens_seqs.append(tokens)
tags_seqs.append(tags)
return [tokens_seqs, tags_seqs]
#%% Load pico json file
def load_pico(json_path, group='train'):
'''
tokens_seqs : list. Each element is a list of tokens for one abstract
tokens_seqs[i] --> ['Infected', 'mice', 'that', ...]
tags_seqs : list. Each element is a list of tags for one abstract
tags_seqs[i] --> [''O', 'B-Species', 'O', ...]
'''
dat = [json.loads(line) for line in open(json_path, 'r')]
tokens_seqs, tags_seqs = [], []
for ls in dat:
if ls['group'] == group:
tokens_seqs.append(ls['sent'])
tags_seqs.append(ls['sent_tags'])
return tokens_seqs, tags_seqs
#%%
# def tokenize_encode(seqs, tags, tag2idx, tokenizer):
# inputs = tokenizer(seqs, is_split_into_words=True, return_offsets_mapping=True, padding=False, truncation=True)
# tags_enc = [[tag2idx[tag] for tag in record] for record in tags] # convert tags to idxs
# tags_upt = []
# for doc_tags, doc_offset in zip(tags_enc, inputs.offset_mapping):
# arr_offset = np.array(doc_offset)
# doc_tags_enc = np.ones(len(doc_offset), dtype=int) * -100 # create an empty array of -100
# # set tags whose first offset position is 0 and the second is not 0
# doc_tags_enc[(arr_offset[:, 0] == 0) & (arr_offset[:, 1] != 0)] = doc_tags
# tags_upt.append(doc_tags_enc.tolist())
# inputs.pop("offset_mapping")
# inputs.update({'tags': tags_upt})
# return inputs
# https://github.com/huggingface/notebooks/blob/master/examples/token_classification.ipynb
def tokenize_encode(seqs, tags, tag2idx, tokenizer, tag_all_tokens=True):
inputs = tokenizer(seqs, is_split_into_words=True, return_offsets_mapping=True, padding=False, truncation=True)
all_tags_enc_old = [[tag2idx[tag] for tag in record] for record in tags] # convert tags to idxs
all_tags_enc_new = []
all_word_ids = []
for i, tags_old in enumerate(all_tags_enc_old): # per sample
word_ids = inputs.word_ids(batch_index=i)
tags_new = []
pre_word_id = None
for wid in word_ids:
if wid is None: # set tag to -100 for special token like [SEP] or [CLS]
tags_new.append(-100)
elif wid != pre_word_id: # Set label for the first token of each word
tags_new.append(tags_old[wid])
else:
# For other tokens in a word, set the tag to either the current tag or -100, depending on the tag_all_tokens flag
tags_new.append(tags_old[wid] if tag_all_tokens else -100)
pre_word_id = wid
all_tags_enc_new.append(tags_new)
word_ids[0] = -100 # [CLS]
word_ids[-1] = -100 # [SEP]
all_word_ids.append(word_ids)
# inputs.pop("offset_mapping")
inputs.update({'tags': all_tags_enc_new, 'word_ids': all_word_ids})
return inputs
#%%
class EncodingDataset(torch.utils.data.Dataset):
def __init__(self, encodings):
self.encodings = encodings
def __getitem__(self, idx):
return {key: torch.tensor(value[idx]) for key, value in self.encodings.items()}
def __len__(self):
return len(self.encodings.input_ids)
#%%
class PadDoc():
def __call__(self, batch):
# Element in batch: {'input_ids': Tensor, 'attention_mask': Tensor, 'tags': Tensor}
# Sort batch by seq_len in descending order
# x['input_ids']: [seq_len]
sorted_batch = sorted(batch, key=lambda x: len(x['input_ids']), reverse=True)
# Pad within batch
input_ids = [x['input_ids'] for x in sorted_batch]
input_ids_padded = nn.utils.rnn.pad_sequence(input_ids, batch_first=True)
attn_masks = [x['attention_mask'] for x in sorted_batch]
attn_masks_padded = nn.utils.rnn.pad_sequence(attn_masks, batch_first=True)
tags = [x['tags'] for x in sorted_batch]
tags_padded = nn.utils.rnn.pad_sequence(tags, batch_first=True)
# Store length of each doc for unpad them later
true_lens = torch.LongTensor([len(x)-2 for x in tags]) # [cls] and [sep] shouldn't be count into length
word_ids = [x['word_ids'] for x in sorted_batch]
word_ids_padded = nn.utils.rnn.pad_sequence(word_ids, batch_first=True)
return input_ids_padded, attn_masks_padded, tags_padded, true_lens, word_ids_padded
#%%
def epoch_idx2tag(epoch_sample_idxs, idx2tag):
'''
epoch_sample_idxs: list of tag lists with its true seq_len
len(epoch_sample_idxs) = n_samples
len(epoch_sample_idxs[0]) = true seq_len (varies among samples)
Convert epoch_idxs to epoch_tags
'''
epoch_tags = []
# idxs is list of index for a single text
for idxs in epoch_sample_idxs:
tags = [idx2tag[i] for i in idxs]
epoch_tags.append(tags)
return epoch_tags
#%%
from seqeval.metrics import f1_score, recall_score, precision_score, accuracy_score
def scores(epoch_trues, epoch_preds):
f1 = f1_score(epoch_trues, epoch_preds)
rec = recall_score(epoch_trues, epoch_preds)
prec = precision_score(epoch_trues, epoch_preds)
acc = accuracy_score(epoch_trues, epoch_preds)
return {"f1": np.around(f1, 4),
"rec": np.around(rec, 4),
"prec": np.around(prec, 4),
"acc": np.around(acc, 4)}
# return {"f1": f1, # np.around(f1, 4),
# "rec": rec, #np.around(rec, 4),
# "prec": prec, #np.around(prec, 4),
# "acc": acc} #np.around(acc, 4)}
#%%
def save_checkpoint(state, is_best, checkdir):
"""
Save model and training parameters at checkpoint + 'last.pth.tar'.
If is_best==True, also saves checkpoint + 'best.pth.tar'
Params:
state: (dict) contains model's state_dict, may contain other keys such as epoch, optimizer state_dict
is_best: (bool) True if it is the best model seen till now
checkdir: (string) folder where parameters are to be saved
"""
filepath = os.path.join(checkdir, 'last.pth.tar')
if os.path.exists(checkdir) == False:
os.mkdir(checkdir)
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(checkdir, 'best.pth.tar'))
def load_checkpoint(checkfile, model, optimizer=None):
"""
Load model parameters (state_dict) from checkfile.
If optimizer is provided, loads state_dict of optimizer assuming it is present in checkpoint.
Params:
checkfile: (string) filename which needs to be loaded
model: (torch.nn.Module) model for which the parameters are loaded
optimizer: (torch.optim) optional: resume optimizer from checkpoint
"""
if os.path.exists(checkfile) == False:
raise("File doesn't exist {}".format(checkfile))
checkfile = torch.load(checkfile)
model.load_state_dict(checkfile['state_dict'])
if optimizer:
optimizer.load_state_dict(checkfile['optim_dict'])
return checkfile
def save_dict_to_json(d, json_path):
"""
Save dict of floats to json file
d: dict of float-castable values (np.float, int, float, etc.)
"""
with open(json_path, 'w') as fout:
d = {key: float(value) for key, value in d.items()}
json.dump(d, fout, indent=4)
#%%
import pandas as pd
import numpy as np
import json
import matplotlib.pyplot as plt
def plot_prfs(prfs_json_path):
with open(prfs_json_path) as f:
dat = json.load(f)
# Create scores dataframe
epochs = int(len(dat['prfs'])/2)
cols = ['f1', 'rec', 'prec', 'acc', 'loss']
train_df = | pd.DataFrame(columns=cols) | pandas.DataFrame |
"""This module contains all functions that are either called from Excel
or manipulate Excel.
"""
import datetime as dt
from dateutil import tz
import requests
import pandas as pd
import matplotlib.pyplot as plt
import xlwings as xw
import database
# This is the part of the URL that is the same for every request
BASE_URL = "https://pypi.org/pypi"
def add_package():
""" Adds a new package including the version history to the database.
Triggers an update of the dropdown on the Tracker tab.
"""
# Excel objects
db_sheet = xw.Book.caller().sheets["Database"]
package_name = db_sheet["new_package"].value
feedback_cell = db_sheet["new_package"].offset(column_offset=1)
# Clear feedback cell
feedback_cell.clear_contents()
# Check if the package exists on PyPI
if not package_name:
feedback_cell.value = "Error: Please provide a name!"
return
if requests.get(f"{BASE_URL}/{package_name}/json",
timeout=6).status_code != 200:
feedback_cell.value = "Error: Package not found!"
return
# Insert the package name into the packages table
error = database.store_package(package_name)
db_sheet["new_package"].clear_contents()
# Show any errors, otherwise kick off a database update and
# refresh the dropdown so you can select the new package
if error:
feedback_cell.value = f"Error: {error}"
else:
feedback_cell.value = f"Added {package_name} successfully."
update_database()
refresh_dropdown()
def update_database():
""" Deletes all records from the versions table, fetches all
data again from PyPI and stores the versions again in the table.
"""
# Excel objects
sheet_db = xw.Book.caller().sheets["Database"]
# Clear logs
sheet_db["log"].expand().clear_contents()
# Keeping things super simple: Delete all versions for all packages
# and repopulate the package_versions table from scratch
database.delete_versions()
df_packages = database.get_packages()
logs = []
# Query the PyPI REST API
for package_id, row in df_packages.iterrows():
ret = requests.get(f"{BASE_URL}/{row['package_name']}/json",
timeout=6)
if ret.status_code == 200:
ret = ret.json() # parse the JSON string into a dictionary
logs.append(f"INFO: {row['package_name']} downloaded successfully")
else:
logs.append(f"ERROR: Could not download data for {row['package_name']}")
continue
# Instantiate a DataFrame by extracting data from the REST API response
releases = []
for version, files in ret["releases"].items():
if ret["releases"][version]: # ignore releases without info
releases.append((files[0]["upload_time"], version, package_id))
df_releases = pd.DataFrame(columns=["uploaded_at", "version_string", "package_id"],
data=releases)
df_releases["uploaded_at"] = | pd.to_datetime(df_releases["uploaded_at"]) | pandas.to_datetime |
import os
import pandas as pd
from ctg_benchmark.evaluation.metrics import aggregate_class
from ctg_benchmark.utils.io import load_yaml
def expand_class_metric(score, name='metric'):
return {f'{name} - {i}': s for i, s in enumerate(score)}
def results_to_dataframe(list_results, index_to_ignore=None, **kwargs):
all_records = []
for exp_res in list_results:
accuracy_class, num_nan = aggregate_class(exp_res['results']['accuracy_class'], index=index_to_ignore)
accuracy_f1, _ = aggregate_class(exp_res['results']['f1_class'], index=index_to_ignore)
records = {'Accuracy': exp_res['results']['accuracy_micro'],
'F1': exp_res['results']['f1_micro'],
'Dice': exp_res['results']['dice'],
'Accuracy Class': accuracy_class,
'F1 Class': accuracy_f1,
'num Nan': num_nan,
'file_path': exp_res['file_path'][0],
'stack': exp_res['meta']['stack'][0],
'stage': exp_res['meta']['stage'][0],
'multiplicity': exp_res['meta']['multiplicity'][0],
'unique_idx': exp_res['meta']['unique_idx'][0]}
records.update(expand_class_metric(exp_res['results']['accuracy_class'], name='Accuracy Class'))
records.update(expand_class_metric(exp_res['results']['f1_class'], name='F1 Class'))
records.update(kwargs)
all_records.append(records)
return | pd.DataFrame.from_records(all_records) | pandas.DataFrame.from_records |
import ast
import os
import re
import uuid
import pandas as pd
import configuration as cf
from guesslang import Guess
from pydriller import Repository
from utils import log_commit_urls
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
fixes_columns = [
'cve_id',
'hash',
'repo_url',
]
commit_columns = [
'hash',
'repo_url',
'author',
'author_date',
'author_timezone',
'committer',
'committer_date',
'committer_timezone',
'msg',
'merge',
'parents',
'num_lines_added',
'num_lines_deleted',
'dmm_unit_complexity',
'dmm_unit_interfacing',
'dmm_unit_size'
]
file_columns = [
'file_change_id',
'hash',
'filename',
'old_path',
'new_path',
'change_type',
'diff',
'diff_parsed',
'num_lines_added',
'num_lines_deleted',
'code_after',
'code_before',
'nloc',
'complexity',
'token_count',
'programming_language'
]
method_columns = [
'method_change_id',
'file_change_id',
'name',
'signature',
'parameters',
'start_line',
'end_line',
'code',
'nloc',
'complexity',
'token_count',
'top_nesting_level',
'before_change',
]
def extract_project_links(df_master):
"""
extracts all the reference urls from CVE records that match to the repo commit urls
"""
df_fixes = pd.DataFrame(columns=fixes_columns)
git_url = r'(((?P<repo>(https|http):\/\/(bitbucket|github|gitlab)\.(org|com)\/(?P<owner>[^\/]+)\/(?P<project>[^\/]*))\/(commit|commits)\/(?P<hash>\w+)#?)+)'
cf.logger.info('-' * 70)
cf.logger.info('Extracting all the reference urls from CVE...')
for i in range(len(df_master)):
ref_list = ast.literal_eval(df_master['reference_json'].iloc[i])
if len(ref_list) > 0:
for ref in ref_list:
url = dict(ref)['url']
link = re.search(git_url, url)
if link:
row = {
'cve_id': df_master['cve_id'][i],
'hash': link.group('hash'),
'repo_url': link.group('repo').replace(r'http:', r'https:')
}
df_fixes = df_fixes.append(pd.Series(row), ignore_index=True)
df_fixes = df_fixes.drop_duplicates().reset_index(drop=True)
cf.logger.info('Number of collected references to vulnerability fixing commits:', len(df_fixes))
return df_fixes
def guess_pl(code):
"""
:returns guessed programming language of the code
"""
if code:
return Guess().language_name(code.strip())
else:
return 'unknown'
def clean_string(signature):
return signature.strip().replace(' ','')
def get_method_code(source_code, start_line, end_line):
try:
if source_code is not None:
code = ('\n'.join(source_code.split('\n')[int(start_line) - 1: int(end_line)]))
return code
else:
return None
except Exception as e:
cf.logger.warning('Problem while getting method code from the file!', e)
pass
def changed_methods_both(file):
"""
Return the list of methods that were changed.
:return: list of methods
"""
new_methods = file.methods
old_methods = file.methods_before
added = file.diff_parsed["added"]
deleted = file.diff_parsed["deleted"]
methods_changed_new = {
y
for x in added
for y in new_methods
if y.start_line <= x[0] <= y.end_line
}
methods_changed_old = {
y
for x in deleted
for y in old_methods
if y.start_line <= x[0] <= y.end_line
}
return methods_changed_new, methods_changed_old
# --------------------------------------------------------------------------------------------------------
# extracting method_change data
def get_methods(file, file_change_id):
"""
returns the list of methods in the file.
"""
file_methods = []
try:
if file.changed_methods:
cf.logger.debug('-' * 70)
cf.logger.debug('\nmethods_after: ')
cf.logger.debug('- ' * 35)
for m in file.methods:
if m.name != '(anonymous)':
cf.logger.debug(m.long_name)
cf.logger.debug('\nmethods_before: ')
cf.logger.debug('- ' * 35)
for mb in file.methods_before:
if mb.name != '(anonymous)':
cf.logger.debug(mb.long_name)
cf.logger.debug('\nchanged_methods: ')
cf.logger.debug('- ' * 35)
for mc in file.changed_methods:
if mc.name != '(anonymous)':
cf.logger.debug(mc.long_name)
cf.logger.debug('-' * 70)
# for mb in file.methods_before:
# for mc in file.changed_methods:
# #if mc.name == mb.name and mc.name != '(anonymous)':
# if clean_string(mc.long_name) == clean_string(mb.long_name) and mc.name != '(anonymous)':
if file.changed_methods:
methods_after, methods_before = changed_methods_both(file) # modified methods in source_code_after/_before
if methods_before:
for mb in methods_before:
# filtering out code not existing, and (anonymous)
# because lizard API classifies the code part not as a correct function.
# Since, we did some manual test, (anonymous) function are not function code.
# They are also not listed in the changed functions.
if file.source_code_before is not None and mb.name != '(anonymous)':
# method_before_code = ('\n'.join(file.source_code_before.split('\n')[int(mb.start_line) - 1: int(mb.end_line)]))
method_before_code = get_method_code(file.source_code_before, mb.start_line, mb.end_line)
method_before_row = {
'method_change_id': uuid.uuid4().fields[-1],
'file_change_id': file_change_id,
'name': mb.name,
'signature': mb.long_name,
'parameters': mb.parameters,
'start_line': mb.start_line,
'end_line': mb.end_line,
'code': method_before_code,
'nloc': mb.nloc,
'complexity': mb.complexity,
'token_count': mb.token_count,
'top_nesting_level': mb.top_nesting_level,
'before_change': 'True',
}
file_methods.append(method_before_row)
if methods_after:
for mc in methods_after:
if file.source_code is not None and mc.name != '(anonymous)':
# changed_method_code = ('\n'.join(file.source_code.split('\n')[int(mc.start_line) - 1: int(mc.end_line)]))
changed_method_code = get_method_code(file.source_code, mc.start_line, mc.end_line)
changed_method_row = {
'method_change_id': uuid.uuid4().fields[-1],
'file_change_id': file_change_id,
'name': mc.name,
'signature': mc.long_name,
'parameters': mc.parameters,
'start_line': mc.start_line,
'end_line': mc.end_line,
'code': changed_method_code,
'nloc': mc.nloc,
'complexity': mc.complexity,
'token_count': mc.token_count,
'top_nesting_level': mc.top_nesting_level,
'before_change': 'False',
}
file_methods.append(changed_method_row)
if file_methods:
return file_methods
else:
return None
except Exception as e:
cf.logger.warning('Problem while fetching the methods!', e)
pass
# ---------------------------------------------------------------------------------------------------------
# extracting file_change data of each commit
def get_files(commit):
"""
returns the list of files of the commit.
"""
commit_files = []
commit_methods = []
try:
cf.logger.info(f'Extracting files for {commit.hash}')
if commit.modified_files:
for file in commit.modified_files:
cf.logger.debug(f'Processing file {file.filename} in {commit.hash}')
# programming_language = (file.filename.rsplit(".')[-1] if '.' in file.filename else None)
programming_language = guess_pl(file.source_code) # guessing the programming language of fixed code
file_change_id = uuid.uuid4().fields[-1]
file_row = {
'file_change_id': file_change_id, # filename: primary key
'hash': commit.hash, # hash: foreign key
'filename': file.filename,
'old_path': file.old_path,
'new_path': file.new_path,
'change_type': file.change_type, # i.e. added, deleted, modified or renamed
'diff': file.diff, # diff of the file as git presents it (e.g. @@xx.. @@)
'diff_parsed': file.diff_parsed, # diff parsed in a dict containing added and deleted lines lines
'num_lines_added': file.added_lines, # number of lines added
'num_lines_deleted': file.deleted_lines, # number of lines removed
'code_after': file.source_code,
'code_before': file.source_code_before,
'nloc': file.nloc,
'complexity': file.complexity,
'token_count': file.token_count,
'programming_language': programming_language,
}
file_methods = []
commit_files.append(file_row)
file_methods = get_methods(file, file_change_id)
if file_methods is not None:
commit_methods.extend(file_methods)
else:
cf.logger.info('The list of modified_files is empty')
return commit_files, commit_methods
except Exception as e:
cf.logger.warning('Problem while fetching the files!', e)
pass
def extract_commits(repo_url, hashes):
"""This function extract git commit information of only the hashes list that were specified in the
commit URL. All the commit_fields of the corresponding commit have been obtained.
Every git commit hash can be associated with one or more modified/manipulated files.
One vulnerability with same hash can be fixed in multiple files so we have created a dataset of modified files
as 'df_file' of a project.
:param repo_url: list of url links of all the projects.
:param hashes: list of hashes of the commits to collect
:return dataframes: at commit level and file level.
"""
repo_commits = []
repo_files = []
repo_methods = []
# ----------------------------------------------------------------------------------------------------------------
# extracting commit-level data
if 'github' in repo_url:
repo_url = repo_url + '.git'
cf.logger.debug(f'Extracting commits for {repo_url} with {cf.NUM_WORKERS} worker(s) looking for the following hashes:')
log_commit_urls(repo_url, hashes)
# giving first priority to 'single' parameter for single hash because
# it has been tested that 'single' gets commit information in some cases where 'only_commits' does not,
# for example: https://github.com/hedgedoc/hedgedoc.git/35b0d39a12aa35f27fba8c1f50b1886706e7efef
single_hash = None
if len(hashes) == 1:
single_hash = hashes[0]
hashes = None
for commit in Repository(path_to_repo=repo_url,
only_commits=hashes,
single=single_hash,
num_workers=cf.NUM_WORKERS).traverse_commits():
cf.logger.debug(f'Processing {commit.hash}')
try:
commit_row = {
'hash': commit.hash,
'repo_url': repo_url,
'author': commit.author.name,
'author_date': commit.author_date,
'author_timezone': commit.author_timezone,
'committer': commit.committer.name,
'committer_date': commit.committer_date,
'committer_timezone': commit.committer_timezone,
'msg': commit.msg,
'merge': commit.merge,
'parents': commit.parents,
'num_lines_added': commit.insertions,
'num_lines_deleted': commit.deletions,
'dmm_unit_complexity': commit.dmm_unit_complexity,
'dmm_unit_interfacing': commit.dmm_unit_interfacing,
'dmm_unit_size': commit.dmm_unit_size,
}
commit_files, commit_methods = get_files(commit)
repo_commits.append(commit_row)
repo_files.extend(commit_files)
repo_methods.extend(commit_methods)
except Exception as e:
cf.logger.warning('Problem while fetching the commits!', e)
pass
if repo_commits:
df_repo_commits = | pd.DataFrame.from_dict(repo_commits) | pandas.DataFrame.from_dict |
import pandas as pd
REGEX_SEARCHES = {
'class_matches': '^([OABFGKM])',
'type_matches': '^.*([VI])+',
'number_matches': '^[OABFGKM]([0-9])'
}
USED_SEARCHES = ['class', 'type']
def run():
raw_df = load_csv_data('rawStars.csv')
raw_df = determine_matches(raw_df)
df = apply_regex(raw_df)
df.to_csv('stars.csv')
def load_csv_data(filepath):
df = pd.read_csv(filepath)
df.columns = map(str.lower, df.columns)
return df
def determine_matches(df):
df.loc[ | pd.isnull(df['spectrum']) | pandas.isnull |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate
def gap_size(ts):
from itertools import groupby
"""Find the size of gaps (blocks of nans) for every point in an array.
Parameters
----------
x : array_like
an array that possibly has nans
Returns
gaps : array_like
An array the values of which represent the size of the gap (number of nans) for each point in x, which will be zero for non-nan points.
"""
isgap = zeros_like(x)
isgap[isnan(x)] = 1
gaps = []
for a, b in groupby(isgap, lambda x: x == 0):
if a: # Where the value is 0, simply append to the list
gaps.extend(list(b))
else: # Where the value is one, replace 1 with the number of sequential 1's
l = len(list(b))
gaps.extend([l]*l)
return array(gaps)
def main():
tndx = pd.date_range(start="2019-01-01", end="2019-01-10", freq="H")
tnum = np.arange(0., len(tndx))
signal = np.cos(tnum*2.*np.pi/24.)
signal[80:85] = np.nan
signal[160:168:2] = np.nan
df = pd.DataFrame({"signal": signal}, index=tndx)
orig = df.index[0]
x0 = (df.index - orig).total_seconds()
y0 = df.values
# Express the destination times as a dataframe and append to the source
tndx2 = pd.DatetimeIndex(
['2019-01-04 00:00', '2019-01-04 10:17', '2019-01-07 16:00'])
x1 = (tndx2 - orig).total_seconds()
# Extract at destination locations
good = ~np.isnan(y0).flatten()
# print x0[good]
# print y0[good]
interpolator = scipy.interpolate.interp1d(
x0[good], y0[good], kind='cubic', axis=0, fill_value=np.nan, assume_sorted=False)
interpolated = interpolator(x1)
# print interpolated
def main1():
tndx = pd.date_range(start="2019-01-01", end="2019-01-10", freq="H")
tnum = np.arange(0., len(tndx))
signal = np.cos(tnum*2.*np.pi/24.)
signal[80:85] = np.nan
signal[160:168:2] = np.nan
df = pd.DataFrame({"signal": signal}, index=tndx)
# Express the destination times as a dataframe and append to the source
tndx2 = pd.DatetimeIndex(
['2019-01-04 00:00', '2019-01-04 10:17', '2019-01-07 16:00'])
df2 = | pd.DataFrame({"signal": [np.nan, np.nan, np.nan]}, index=tndx2) | pandas.DataFrame |
#! /usr/bin/env python
# by weil
# Sep 17, 2020
import pandas as pd
import numpy as np
import Cell_BLAST as cb
import scipy
import os
import scanpy as sc
from anndata import AnnData
from utils import construct_dataset
# expr matrix
expr_mat=pd.read_csv("../download/MacParland/GSE115469_Data.csv.gz", index_col=0)
# reshape to cell * gene
expr_mat=expr_mat.T
# cell meta
meta_df=pd.read_csv("../download/MacParland/Cell_clusterID_cycle.txt", sep="\t", index_col=0)
meta_df.columns=["donor", "barcode", "cluster", "cell_cycle"]
meta_df=meta_df.drop(columns="barcode")
meta_df["donor"]=meta_df["donor"].str.slice(0, 2)
# add donor meta
donor_meta=pd.read_csv("../download/MacParland/donor_annotation.csv")
meta_df["cell_id"]=meta_df.index
meta_df1=meta_df.merge(donor_meta, on="donor")
# add cluster annotation
cluster_annotation= | pd.read_csv("../download/MacParland/cluster_annotation.csv") | pandas.read_csv |
"""`Nate` importers involving pandas.
This module provides common importers for the `Nate` class. They use existing
pandas import functionality as an interface to `Nate`. These importers are the
reccomended way to import data into `Nate`, unless the user needs to import data
in ways not covered by this module's functionality.
"""
import pandas
from typing import List, Union
from .named_tuple_generator import tupleize
from .nate_class import Nate
from .timestamp_process import convert_times
def process_dataframe(temp_data,
text: str,
unique_id: str = None,
time: str = None,
twitter_times: bool = False,
columns_to_keep: List = []):
"""Builds a nate object from a dataframe."""
series_dict = {}
special_column_list = [(text, "text"), (unique_id, "unique_id"),
(time, "times")]
for special_column, special_column_name in special_column_list:
if special_column != None:
temp_column = temp_data[special_column]
temp_column.name = special_column_name
series_dict[special_column_name] = temp_column.tolist()
for covariate_column in columns_to_keep:
temp_column = temp_data[covariate_column]
temp_column.name = covariate_column
series_dict[covariate_column] = temp_column.tolist()
if time != None:
try:
series_dict['time'] = convert_times(series_dict['times'])
del series_dict['times']
except:
series_dict['time'] = series_dict['times']
del series_dict['times']
return Nate(tupleize(series_dict))
def import_dataframe(input_dataframe: pandas.DataFrame,
text: str,
unique_id: str = None,
time: str = None,
twitter_times: bool = False,
columns_to_keep: List = []):
"""Imports a pandas dataframe into nate.
Args:
input_dataframe (pandas.DataFrame): The dataframe to be loaded.
text (str): The name of the column containing the text data to be
analyzed with nate. Required for all uses of nate.
unique_id (str, optional): The name of the column containing unique
identifiers (e.g. a unique name or hash ID#). Required
for some uses of nate (e.g. Divsim).
time (str, optional): The name of the column containing the time the
observation was recorded. Required for some uses of
nate (e.g. edge_burst).
columns_to_keep (list, optional): A list of column names indicating
which columns not specified elsewhere (e.g. for the
time parameter) are kept.
Returns:
Nate: an instance of the `Nate` class containing all data from the
columns specified in the parameters.
The columns indicated in the text, unique_id, and time parameters will
be renamed to 'text', 'unique_id', and 'time', accordingly. The names
of the columns listed in 'columns_to_keep' will be preserved as-is.
"""
if time!= None and twitter_times == False:
input_dataframe = input_dataframe.astype({time: 'str'})
input_dataframe[time] = pandas.to_datetime(input_dataframe[time], infer_datetime_format=True)
return process_dataframe(input_dataframe, text, unique_id, time, twitter_times,
columns_to_keep)
def import_csv(file_paths: Union[List, str],
text: str,
unique_id: str = None,
time: str = None,
twitter_times: bool = False,
columns_to_keep: List = [],
observation_threshold=0):
"""Imports a comma-separated values file (.csv) into `nate`.
This function uses pre-existing pandas functionality to read in a
comma-separated value file (.csv) into `nate`.
Args:
file_path (str or path-like): The location of the file to
be loaded from disk.
text (str): The name of the column containing the text
data to be analyzed with nate. Required for all uses of nate.
unique_id (str, optional): The name of the column containing unique
identifiers (e.g. a unique name or hash ID#). Required for
some uses of nate (e.g. Divsim).
time (str, optional): The name of the column containing the time the
observation was recorded. Required for some uses of nate
(e.g. edgeburst).
columns_to_keep (list, optional): A list of column names indicating
which columns not specified elsewhere (e.g. for the time
parameter) are kept.
observation_threshold (int, optional): An integer indicating how many
observations to include in the imported data, at minimum.
Once the number of rows in the imported dataset exceeds this value,
the importer will not import the next file in the list of
file paths passed to `file_path`. Has no effect if a string
or path-like object is passed to `file_paths`.
Returns:
Nate: an instance of the `Nate` class containing all data from the
columns specified in the parameters.
The columns indicated in the text, unique_id, and time parameters will
be renamed to 'text', 'unique_id', and 'time', accordingly. The names of the
columns listed in 'columns_to_keep' will be preserved as-is.
Note that this function is only equipped to handle pre-processed .csv
files that are ready to be loaded into a pandas dataframe with no
additional manipulation. If the data requires any kind of special
treatment, prudent users will first load their data using pandas
directly into python, and then use the 'import_dataframe' function
to load their data into nate.
"""
columns_to_import = [*columns_to_keep]
for special_column in [text, unique_id, time]:
if special_column != None:
columns_to_import.append(special_column)
dtypes = {}
if time!= None:
dtypes[time] = "str"
if isinstance(file_paths, list):
df_list = []
total_len = 0
for entry in file_paths:
temp_df = pandas.read_csv(entry, usecols=columns_to_import, dtype = dtypes)
df_list.append(temp_df)
if observation_threshold != 0:
total_len += len(temp_df)
if total_len >= observation_threshold:
break
temp_data = | pandas.concat(df_list) | pandas.concat |
import fiona
import geopandas as gpd
import json
import matplotlib
import numpy as np
import os
import pandas as pd
import rasterio
import rasterio.mask
matplotlib.use("tkagg")
import matplotlib.pyplot as plt
import seaborn
import seaborn as sns
import pickle
import warnings
warnings.filterwarnings("ignore", "GeoSeries.notna", UserWarning)
from sklearn.metrics import confusion_matrix
from shapely.geometry import box, Polygon
from pyproj import CRS
from rasterio import merge
from copy import deepcopy
from glob import glob
from subprocess import check_output
from collections import defaultdict
import runspec as rs
ACRES_PER_SQUARE_METER = 0.000247105
MONTANA_SHAPEFILE = "/home/thomas/irrigated-training-data-aug21/aux-shapefiles/mt.shp"
def merge_rasters_gdal(raster_files, out_filename):
if not len(raster_files):
return
if not os.path.isfile(out_filename):
print("processing", out_filename)
cmd = check_output(
["gdal_merge.py", "-of", "GTiff", "-ot", "Byte", "-o", out_filename]
+ raster_files
)
print(cmd)
def flu_data_irr_area_by_county(county_shp, flu, out_filename, plot=False, save=False):
if os.path.isfile(out_filename):
return
flu = gpd.read_file(flu)
flu = flu.loc[flu["LType"] == "I"]
counties = gpd.read_file(county_shp)
flu = flu.to_crs("EPSG:5070")
counties = counties.to_crs("EPSG:5070")
counties_with_irr_attr = counties.copy()
irrigated_area = {}
for row in counties.iterrows():
polygon = Polygon(row[1]["geometry"])
county_name = row[1]["NAME"]
# if county_name not in ('TETON', 'CASCADE'):
# continue
# print(county_name)
try:
flu_county = gpd.clip(flu, polygon)
except Exception as e:
print("error", county_name, e)
irrigated_area[county_name] = -1
continue
if plot:
fig, ax = plt.subplots()
flu_county.plot(ax=ax)
poly_gdf = gpd.geopandas.GeoDataFrame(
[1], geometry=[polygon], crs=counties.crs
)
poly_gdf.boundary.plot(ax=ax, color="red")
plt.title(county_name)
plt.show()
else:
irr_acres = np.sum(flu_county["geometry"].area)
irrigated_area[county_name] = irr_acres
names = list(irrigated_area.keys())
areas = list(irrigated_area.values())
for name, area in zip(names, areas):
print(name, area * ACRES_PER_SQUARE_METER)
counties_with_irr_attr["IRR_AREA"] = areas
if save:
counties_with_irr_attr.to_file(out_filename)
def merge_split_rasters_copy_band_descriptions(rasters, out_filename):
if not os.path.isfile(out_filename):
dsets = []
for raster in rasters:
dsets.append(rasterio.open(raster, "r"))
merged, out_transform = merge.merge(dsets)
with rasterio.open(rasters[0], "r") as src:
meta = src.meta.copy()
descriptions = src.descriptions
meta.update(
{
"height": merged.shape[1],
"width": merged.shape[2],
"transform": out_transform,
}
)
with rasterio.open(out_filename, "w", **meta) as dst:
dst.descriptions = descriptions
dst.write(merged)
for raster in rasters:
print("removing", raster)
os.remove(raster)
def _assign_year(raster, years):
for year in years:
if year in raster:
return year
raise ValueError("raster {} didn't have a year attr.")
def rename_rasters(rasters, county_shapefile, out_directory):
osb = os.path.basename
years = [str(r) for r in range(2000, 2020)]
gdf = gpd.read_file(county_shapefile)
gdf = gdf.to_crs("EPSG:5070")
for raster in rasters:
year = _assign_year(osb(raster), years)
with rasterio.open(raster, "r") as src:
bounds = src.bounds
geom = box(*bounds)
n = 0
for row in gdf.iterrows():
poly = Polygon(row[1]["geometry"])
if geom.contains(poly):
n += 1
name = row[1]["NAME"]
if n > 1:
raise ValueError("raster {} contains more than one county".format(raster))
else:
out_filename = os.path.join(out_directory, name + "_" + year + ".tif")
print(out_filename)
# os.rename(raster, out_filename)
def check_for_missing_rasters(rasters, county_shapefile):
osb = os.path.basename
years = [str(r) for r in range(2000, 2020)]
gdf = gpd.read_file(county_shapefile)
counties = gdf.loc[:, "NAME"]
for year in years:
yearly_rasters = [f for f in rasters if year in osb(f)]
county_rasters = [osb(f)[: osb(f).find("_")] for f in yearly_rasters]
missing = counties[~counties.isin(county_rasters)]
print(missing, len(yearly_rasters), counties.shape[0], year)
def clip_raster_to_shapefiles_gdal(raster_file, shapefiles, out_directory, year):
for f in shapefiles:
out_filename = os.path.join(
out_directory,
os.path.splitext(os.path.basename(f))[0] + "_" + year + ".tif",
)
if not os.path.isfile(out_filename):
print("clipping", raster_file, "to", f, "saving to", out_filename)
cmd = check_output(
[
"gdalwarp",
"-of",
"GTiff",
"-cutline",
f,
"-crop_to_cutline",
raster_file,
out_filename,
]
)
def get_irrigated_statistics(rasters_by_county, csv_out):
county_to_year_and_acres = defaultdict(dict)
for i, raster in enumerate(rasters_by_county):
ss = os.path.splitext((os.path.basename(raster)))[0]
year = ss[-4:]
name = ss[:-5]
county_to_year_and_acres[name][year] = calc_irr_area(raster)
print(i)
irr = pd.DataFrame.from_dict(county_to_year_and_acres)
irr = irr.sort_index() # sort by year
irr = irr.sort_index(axis=1) # and county name
irr.to_csv(csv_out)
def tabulate_flu_data(shapefiles, out_filename):
county_to_year_and_acres = defaultdict(dict)
for shp in shapefiles:
year = oss(osb(shp))[0][-4:]
flu = gpd.read_file(shp)
for i, county in flu.iterrows():
name = county["NAME"].lower().replace(" ", "_")
area = county["IRR_AREA"]
county_to_year_and_acres[name][year] = area
df = pd.DataFrame.from_dict(county_to_year_and_acres)
df = df.sort_index()
df = df.sort_index(axis=1)
df.to_csv(out_filename)
def calc_irr_area(f, is_binary):
if not isinstance(f, np.ndarray):
with rasterio.open(f, "r") as src:
arr = src.read()
else:
arr = f
if is_binary:
irrigated = arr[arr == 1]
else:
amax = np.argmax(arr, axis=0)
mask = np.sum(arr, axis=0) == 0
amax = amax[~mask]
irrigated = amax[amax == 0]
return irrigated.shape[0] * (30 ** 2) * ACRES_PER_SQUARE_METER
def convert_to_uint16(files):
for f in files:
print("converting", f)
with rasterio.open(f, "r") as src:
image_stack = src.read()
target_meta = deepcopy(src.meta)
descriptions = src.descriptions
if image_stack.dtype == rasterio.uint16:
print("didn't need to convert", f)
continue
image_stack = image_stack.astype(rasterio.uint16)
target_meta.update({"dtype": rasterio.uint16})
with rasterio.open(f, "w", **target_meta) as dst:
dst.descriptions = descriptions
dst.write(image_stack)
def plotter():
import matplotlib.pyplot as plt
df = pd.read_csv("./montana_irrigated_acreage.csv")
nass = pd.read_csv("./nass_data.csv")
years = [2002, 2007, 2012, 2017]
preds = np.round(df.loc[df.iloc[:, 0].isin(years), :])
fig, ax = plt.subplots(nrows=6, ncols=10)
counties = list(preds.keys())
counties.remove("year")
nass_counties = nass.columns
nass_counties = [
s.replace("MT_", "").lower().replace(" ", "_") for s in nass_counties
]
nass.columns = nass_counties
for i, county in enumerate(counties):
n = nass[county]
p = preds[county]
if i == 55:
ax[i // 10, i % 10].plot(range(4), n, label="nass")
ax[i // 10, i % 10].plot(range(4), p, label="preds")
ax[i // 10, i % 10].axis("off")
ax[i // 10, i % 10].set_title(county)
ax[i // 10, i % 10].legend()
else:
ax[i // 10, i % 10].plot(range(4), n)
ax[i // 10, i % 10].plot(range(4), p)
ax[i // 10, i % 10].set_title(county)
ax[i // 10, i % 10].axis("off")
plt.show()
def plot_df(df, counties=None):
# bin.
if counties == None:
counties = list(df.columns.drop("year"))
years = df.loc[:, "year"].astype(np.int32)
plt.figure(figsize=(18, 15))
for i, county in enumerate(counties):
acreage = df.loc[:, county]
plt.plot(years, acreage, label=county)
plt.plot(years, acreage, "k.")
plt.xticks(years)
plt.ylabel("irrigated acreage")
plt.xlabel("year")
plt.legend()
plt.title("irr. area, selected counties in MT")
def precip_timeseries():
irr = | pd.read_csv("/home/thomas/mt/statistics/irrigated_acreage_cnn_sept28.csv") | pandas.read_csv |
import traceback
from .baseblock import Block, ErrorAtRun, RunSpec, EvalLocals
from .inout import Output
from .plugin import dispatch, find_plugin
import pandas as pd
import numpy as np
import logging
import re
class Session:
__sessions = {}
def __new__(cls, name:str):
if name not in Session.__sessions:
logging.info(f'Creating new session {name}')
s = object.__new__(Session)
s.__init(name)
Session.__sessions[name] = s
dispatch(lambda _, obj: getattr(obj, "__new_session")(name) if hasattr(obj, "__new_session") else None)
else:
s = Session.__sessions[name]
return s
def __init(self, name:str) -> None:
self.name = name
self.rootblock:Block = None
self.out:Output = Output()
self.runspec:RunSpec = None
self._result:tuple = None
def _attach(self, ws):
if self.out.ws is not None and self.out.ws != ws:
raise RuntimeError(f'Session is occupied')
self.out.ws = ws
def _detach(self):
self.out.ws = None
def plugin_invoke(self, plugin:str, action:str, **params):
try:
if action.startswith("__"):
self.out.invalid()
return
plugin = find_plugin(plugin)
if plugin is None or not hasattr(plugin, action):
self.out.invalid()
return
action = getattr(plugin, action)
if not callable(action):
self.out.invalid()
return
action(session=self.name, writer=self.out, **params)
except Exception as e:
logging.err(repr(e))
self.out.error(repr(e))
def run(self, mode:str, upto:str, **kwargs)->None:
try:
if self.rootblock is None:
raise RuntimeError('No receipe')
self.out.working(f'{mode} upto {upto if upto else "end"}...')
self.runspec = RunSpec(mode=RunSpec.RunMode[mode.upper()], upto=upto, out=self.out)
self._result = self.rootblock(self.runspec, None)
self.out.finished("Finished")
except ErrorAtRun as e:
logging.exception(e.exception, exc_info=True)
self.out.error(repr(e.exception), {"atblock": e.at.name})
except Exception as e:
logging.exception(e, exc_info=True)
self.out.error(repr(e))
def __genstat(self):
data:pd.DataFrame = self._result[0]
result:pd.DataFrame = data.describe(include="all")
def fillunique(x:pd.Series):
if "unique" in x and np.isnan(x["unique"]):
x["unique"] = len(data[x.name].unique())
return x
result = result.transform(fillunique, axis=0)
try:
if self._result[1] is not None:
result.loc[len(result.index)] = data.corrwith(self._result[1])
result = result.rename(index={
len(result.index)-1 : "corr"
})
except TypeError:
pass
result.loc[len(result.index)] = data.median()
result.loc[len(result.index)] = data.skew()
result.loc[len(result.index)] = data.dtypes.astype(str)
result = result.rename(index={
len(result.index)-3 : "median",
len(result.index)-2 : "skew",
len(result.index)-1 : "dtype",
}).transpose().reset_index().rename(columns={"index": "column"})
return result.where( | pd.notnull(result) | pandas.notnull |
import base64
import io
import json
from collections import namedtuple
from datetime import date
from typing import Union
import pandas as pd
from django.db.models import Q
from apps.ecommerce.models import Order
from ..organizations.models import Organization
from ..organizations.permissions import check_user_membership
from .models import Category, Event, SignUp
DEFAULT_REPORT_FIELDS = {
"signup_timestamp",
"event_title",
"user_first_name",
"user_last_name",
"signup_user_grade_year",
"signup_user_email",
"signup_user_phone_number",
"user_allergies",
"attendance_status",
"order_timestamp",
"has_paid",
"order_id",
"order_quantity",
"order_total_price",
}
FiletypeSpec = namedtuple("FiletypeSpec", ["content_type", "extension"])
filetype_specs = {
"xlsx": FiletypeSpec(
content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
extension="xlsx",
),
"csv": FiletypeSpec(content_type="text/csv", extension="csv"),
"html": FiletypeSpec(content_type="text/html", extension="html"),
}
class EventResolvers:
def resolve_all_events(self, info, category=None, organization=None, start_time=None, end_time=None):
"""
Get all events that fit the given filters
"""
if category or organization or start_time or end_time:
filteredEvents = Event.objects
if start_time and end_time:
filteredEvents = filteredEvents.filter(start_time__range=(start_time, end_time))
elif start_time:
filteredEvents = filteredEvents.filter(start_time__gte=(start_time))
elif end_time:
filteredEvents = filteredEvents.filter(start_time__lte=(end_time))
queries = []
kwargs = {}
if category is not None:
kwargs["category__name"] = category
# For generalization, if more filters are added later
new_kwargs = {f"{k}__icontains": v for k, v in kwargs.items()}
queries = [Q(**{k: v}) for k, v in new_kwargs.items()]
if organization: # for organizations, check if the organization argument corresponds to either
queries.append( # the organization of the event itself and the parent organization (if it exists)
Q(organization__name__icontains=organization)
| Q(organization__parent__name__icontains=organization)
)
return (
filteredEvents.filter(*queries)
.filter(start_time__gte=date.today()) # Only show events that have yet to pass
.order_by("start_time")
)
return Event.objects.filter(start_time__gte=date.today()).order_by("start_time")
def resolve_default_events(self, info):
"""
For each organization, get the most recent (future) event
"""
return Event.objects.filter(start_time__gte=date.today()).distinct("organization")
def resolve_event(self, info, id):
try:
return Event.objects.get(id=id)
except Event.DoesNotExist:
return None
def resolve_all_categories(self, info):
return Category.objects.all()
def resolve_category(self, info, id):
try:
return Category.objects.get(id=id)
except Category.DoesNotExist:
return None
def resolve_attendee_report(self, info, event_id, fields=None, filetype="xlsx"):
try:
event = Event.objects.get(id=event_id)
except Event.DoesNotExist:
return None
check_user_membership(info.context.user, event.organization)
df = create_attendee_report([event_id], fields)
file_basename = f"attendee_report__eventid_{event_id}"
return wrap_attendee_report_as_json(df, file_basename, filetype)
def resolve_attendee_reports(self, info, event_ids, fields=None, filetype="xlsx"):
for event_id in event_ids:
try:
event = Event.objects.get(id=event_id)
except Event.DoesNotExist:
return None
check_user_membership(info.context.user, event.organization)
df = create_attendee_report(event_ids, fields)
file_basename = f"attendee_report__eventid_{'|'.join(str(id_) for id_ in event_ids)}"
return wrap_attendee_report_as_json(df, file_basename, filetype)
def resolve_attendee_report_org(self, info, org_id, fields=None, filetype="xlsx"):
try:
org = Organization.objects.get(id=org_id)
except Organization.DoesNotExist:
return None
check_user_membership(info.context.user, org)
event_ids = Organization.objects.get(id=org_id).events.values_list("id", flat=True)
df = create_attendee_report(event_ids, fields)
file_basename = f"attendee_report__orgid_{org_id}"
return wrap_attendee_report_as_json(df, file_basename, filetype)
def resolve_sign_ups(self, info, event_id):
try:
event = Event.objects.get(id=event_id)
except Event.DoesNotExist:
return None
check_user_membership(info.context.user, event.organization)
return SignUp.objects.filter(event=event)
def export_single_event(event_id: int, fields: Union[list[str], set[str]]) -> pd.DataFrame:
event: Event = Event.objects.get(pk=event_id)
attending_users = event.signed_up_users[: event.available_slots]
wait_list = event.signed_up_users[event.available_slots :]
sign_ups = SignUp.objects.filter(event_id=event_id, is_attending=True)
df_users = pd.DataFrame(
columns=[
"user_first_name",
"user_last_name",
"user_allergies",
]
)
if attending_users.exists():
df_users_attending = pd.DataFrame(attending_users.values()).set_index("id").add_prefix("user_")
df_users_attending["attendance_status"] = "ATTENDING"
df_users = pd.concat([df_users, df_users_attending])
if wait_list.exists():
df_users_wait_list = pd.DataFrame(wait_list.values()).set_index("id").add_prefix("user_")
df_users_wait_list["attendance_status"] = "WAIT LIST"
df_users = pd.concat([df_users, df_users_wait_list])
if event.products.exists():
product = event.products.first()
orders = Order.objects.filter(product=product)
df_orders = pd.DataFrame(orders.values()).set_index("user_id").add_prefix("order_")
df_users = df_users.join(df_orders)
payment_successful = df_users["order_payment_status"] == Order.PaymentStatus.CAPTURED
df_users["has_paid"] = payment_successful
df_sign_ups = (
pd.DataFrame(sign_ups.order_by("timestamp").values())
.add_prefix("signup_")
.rename(columns={"signup_event_id": "event_id", "signup_user_id": "user_id"})
)
df_joined = df_sign_ups.join(df_users, on="user_id").sort_values(["event_id", "user_id"])
df_joined["event_title"] = event.title
if df_joined.empty:
return pd.DataFrame()
report_fields = list(DEFAULT_REPORT_FIELDS.intersection(df_joined.columns))
fields = set(fields).intersection(report_fields) if fields is not None else report_fields
return df_joined.loc[:, report_fields].drop("password", errors="ignore", axis=1).loc[:, fields]
def create_attendee_report(event_ids, fields):
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pandas as pd
import re
import util
import os
import xml.etree.ElementTree as ET
import datetime as dt
from scipy.sparse import dok_matrix
import hashlib
import six
from six.moves import range
from six.moves import zip
class XGMML(object):
def __init__(self):
self.T_node=None
self.T_edge=None
self.name="untitled"
def parse(self, s_file):
tree=ET.parse(s_file)
root=tree.getroot()
self.name=root.attrib['label']
c_name={}
nodes=[]
for node in root:
if not node.tag.endswith('node'): continue
id=node.attrib['id']
c_name[id]=node.attrib['label']
c={}
#c['_id']=id
for att in node:
if att.tag.endswith('graphics'):
for k,v in att.attrib.items():
c['graphics_'+k]=v
continue
elif not att.tag.endswith('att'):
continue
v=att.attrib.get('value', None)
ty=att.attrib['type']
if ty=='integer':
v=int(v) if v is not None else 0
elif ty=='real':
v=float(v) if v is not None else 0.0
c[att.attrib['name']]='' if pd.isnull(v) else v
nodes.append(c)
self.T_node=pd.DataFrame(nodes)
if 'Gene' in self.T_node.header():
self.T_node['Gene']=self.T_node['Gene'].astype(str)
edges=[]
for edge in root:
if not edge.tag.endswith('edge'): continue
id_A=edge.attrib['source']
id_B=edge.attrib['target']
gene_A=id_A
gene_B=id_B
ty='pp'
if 'label' in edge.attrib:
m=re.search(r'^(\S+)\s+\((\S+)\)\s+(\S+)', edge.attrib['label'])
if m: gene_A, ty, gene_B=m.groups()
c={}
c['Gene_A']=gene_A
c['Name_A']=c_name[id_A]
c['Gene_B']=gene_B
c['Name_B']=c_name[id_B]
c['TYPE']='Direct' if ty=='pp' else 'Indirect'
for att in edge:
if not att.tag.endswith('att'): continue
name=att.attrib['name']
if name in ('canonicalName', 'interaction'): continue
v=att.attrib.get('value', None)
ty=att.attrib['type']
if ty=='integer':
v=int(v) if v is not None else 0
elif ty=='real':
v=float(v) if v is not None else 0.0
c[name]='' if | pd.isnull(v) | pandas.isnull |
import numpy as np
import pandas as pd
#T9
train_url = "http://s3.amazonaws.com/assets.datacamp.com/course/Kaggle/train.csv"
train = pd.read_csv(train_url) #training set
test_url = "http://s3.amazonaws.com/assets.datacamp.com/course/Kaggle/test.csv"
test = pd.read_csv(test_url) #test set
print("printing training head...")
print(train.head())
print("printing training tail...")
print(train.tail())
print("Describing the training data...")
print(train.describe())
#T7
print("The median age of training set is ", train["Age"].median())
#T8
print("The mode of the embarked is", train["Embarked"].mode()[0])
def preProcess(x, train):
x["Age"] = x["Age"].fillna(train["Age"].median())
x.loc[x["Embarked"] == "S", "Embarked"] = 0
x.loc[x["Embarked"] == "C", "Embarked"] = 1
x.loc[x["Embarked"] == "Q", "Embarked"] = 2
x["Embarked"] = x["Embarked"].fillna(train["Embarked"].mode()[0])
x.loc[x["Sex"] == "male", "Sex"] = 0
x.loc[x["Sex"] == "female", "Sex"] = 1
return np.array(x[["Pclass","Sex","Age","Embarked"]].values, dtype = float)
data_train = preProcess(train, train)
data_train=np.concatenate((np.ones((len(data_train),1)), data_train), axis=1)
data_test = preProcess(test, train)
data_test=np.concatenate((np.ones((len(data_test),1)), data_test), axis=1)
lable_train = np.array(train[["Survived"]].values)
#label_test = np.array(test[["Survived"]].values)
def sigmoid(z):
return 1/(1+np.exp(-z))
def z(x, theta):
return x.dot(theta)
def h_log(x,theta):
return sigmoid(z(x,theta))
def h_lin(x, theta):
return x.dot(theta)
def log_loss(y,predicted,eps=1e-15):
predicted = np.clip(predicted, eps, 1 - eps)
return np.sum((-y*np.log(predicted) - (1-y)*np.log(1 - predicted)) / y.shape[0])
def mean_square_loss(y, predicted):
error = predicted - y
return np.sum(error*error) / y.shape[0]
def gradient_descent(theta, x, y, r,h):
error = h(x,theta) - y
delta = r*np.matmul(x.T, error)
return theta - delta
def logistic_train(x, y, epoc, r):
theta = np.random.uniform(-1, 1, (x.shape[1], 1))
losses = []
for i in range(epoc):
predicted = h_log(x,theta)
theta = gradient_descent(theta, x, y, r, h_log)
losses.append(log_loss(y, predicted))
return theta, losses
theta, losses = logistic_train(data_train, lable_train, 3000, 0.00001)
print("last 10 llosses: ", losses[-11:])
train_result = h_log(data_train, theta) > 0.5
predictions = h_log(data_test, theta) > 0.5
predictions = predictions.astype(int)
train_acc = np.count_nonzero(np.equal(lable_train, train_result))/data_train.shape[0]
#test_acc = np.count_nonzero(np.equal(label_test, prediction))/len(data_test.shape[0])
print("The accuracy of the training set is ", train_acc)
#print("The accuracy of the testing set is ", test_acc)
test_result = pd.DataFrame()
test_result['PassengerId'] = test['PassengerId']
test_result['Survived'] = predictions
print("printing result...")
#print(test_result)
test_result.to_csv("titanic_result.csv", index = False)
#OT2
print("begin OT2...")
train = | pd.read_csv(train_url) | pandas.read_csv |
"""Testing creation and manipulation of DataFrameSchema objects."""
import copy
import numpy as np
import pandas as pd
import pytest
from pandera import (
Column, DataFrameSchema, Index, SeriesSchema, Bool, Category, Check,
DateTime, Float, Int, Object, String, Timedelta, errors)
from pandera.schemas import SeriesSchemaBase
from tests.test_dtypes import TESTABLE_DTYPES
def test_dataframe_schema():
"""Tests the Checking of a DataFrame that has a wide variety of types and
conditions. Tests include: when the Schema works, when a column is dropped,
and when a columns values change its type.
"""
schema = DataFrameSchema(
{
"a": Column(Int,
Check(lambda x: x > 0, element_wise=True)),
"b": Column(Float,
Check(lambda x: 0 <= x <= 10, element_wise=True)),
"c": Column(String,
Check(lambda x: set(x) == {"x", "y", "z"})),
"d": Column(Bool,
Check(lambda x: x.mean() > 0.5)),
"e": Column(Category,
Check(lambda x: set(x) == {"c1", "c2", "c3"})),
"f": Column(Object,
Check(lambda x: x.isin([(1,), (2,), (3,)]))),
"g": Column(DateTime,
Check(lambda x: x >= pd.Timestamp("2015-01-01"),
element_wise=True)),
"i": Column(Timedelta,
Check(lambda x: x < pd.Timedelta(10, unit="D"),
element_wise=True))
})
df = pd.DataFrame(
{
"a": [1, 2, 3],
"b": [1.1, 2.5, 9.9],
"c": ["z", "y", "x"],
"d": [True, True, False],
"e": pd.Series(["c2", "c1", "c3"], dtype="category"),
"f": [(3,), (2,), (1,)],
"g": [pd.Timestamp("2015-02-01"),
pd.Timestamp("2015-02-02"),
pd.Timestamp("2015-02-03")],
"i": [pd.Timedelta(1, unit="D"),
| pd.Timedelta(5, unit="D") | pandas.Timedelta |
import pandas as pd
import numpy as np
import random
import warnings
from sklearn import preprocessing
# ordinal (low, medium, high)
# nominal (male, female, other)
#
#
#
#
def create_column_combinations(df, col_combinations:list) -> pd.DataFrame:
"""
Create columns with merged values from multiple columns.
col_combinations: list of lists, e.g. [ ['country', 'city'], ['prod_1', 'prod_2', 'prod3]]
"""
new_columns = []
for col_combo in col_combinations:
if isinstance(col_combo, list):
new_column_name = '_'.join(col_combo)
new_columns.append(new_column_name)
print('combining:', col_combo)
df[new_column_name] = df.loc[:, col_combo].apply(lambda l: '_'.join(l))
return df, new_columns
def create_high_cardinality_bins(df, columns:list, min_count:int = 20, verbose=True) -> pd.DataFrame:
"""
Create new columns with bin-value for high cardinality values, e.g. post codes.
"""
new_columns = []
df['tmp'] = 1
print('replacing high cardinility categories:')
print(f'{"columns".ljust(52)}| rows < min count ({min_count})')
for col in columns:
new_column_name = f'{col}__min_count_{min_count}'
new_columns.append(new_column_name)
print(f'- {col.ljust(50)}', end='| ')
col_counts = df.groupby(col)['tmp'].transform("count")
df[new_column_name] = np.where(col_counts < min_count, 'OTHER_HIGH_CARDINALITY', df[col])
below_min_count = len(col_counts[col_counts<min_count])
print(str(below_min_count).rjust(14))
df = df.drop('tmp', axis=1)
return df, new_columns
def convert_to_pd_catg(df, columns: list, verbose=True) -> pd.DataFrame:
"""
Converts all columns to pandas categorical type.
Enables additional functions and more memory-efficient data handling.
"""
if verbose: print('converting to categorical:')
for col in columns:
try:
if verbose: print(f'- {col}', end=' ')
df[col] = df[col].astype('category')
if verbose: print('ok')
except:
print(' error')
return df
def create_count_encoding(df, columns:list, scaler:'sklearn.preprocessing. ...' = None,
verbose=True, drop_orig_cols=False) -> pd.DataFrame:
"""
Expects a DataFrame with no missing values in specified columns.
Creates new columns for every column combination (one or more columns to be combined).
:df: DataFrame
:column_combinations: list of single or multiple columns,
eg.: ['country', 'product', ['country', 'product']]
:scaler: sklearn scaler for normalization
:drop_orig_cols: drop original columns after count-encoding
"""
# create temporary column with no missing values, used for counting
df['tmp'] = 1
new_columns = []
if verbose: print('adding categorical counts...')
for col in columns:
# set name suffix for new column
new_column_name = 'ft_' + col + '__count'
if verbose: print(f'- {new_column_name.ljust(60)}', end = ' ')
# groupby count transform
counts = df.groupby(col)['tmp'].transform('count').values.reshape(-1, 1)#.astype(int)
if scaler:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
counts = scaler.fit_transform(counts); # suppress warnings
scaler_str = str(type(scaler)).split('.')[-1].split('Scaler')[0].split('Transformer')[0].lower()
new_column_name = f'{new_column_name}_scaled_{scaler_str}'
df[new_column_name] = counts
if verbose: print('unique', str( df[new_column_name].nunique() ).rjust(5),
'| min', str( df[new_column_name].min() ).rjust(5),
'| max', str( df[new_column_name].max() ).rjust(5))
if drop_orig_cols: df = df.drop(col, axis=1)
new_columns.append(new_column_name)
df = df.drop('tmp', axis=1)
return df, new_columns
def create_label_encoding(df, columns:list, drop_orig_cols = False, verbose = True):
"""
Add numerical labels for categorical values.
Values under a specified low total count are grouped together as '0'
"""
#max_col_length = len(max(columns, key=len))
new_columns = []
df['tmp'] = 1
if verbose: print('adding label encoding...')
# set name suffix for new column
for col in columns:
new_column_name = 'ft_' + col + '__label'
new_columns.append(new_column_name)
if verbose: print('-', new_column_name.ljust(50), end=' ')
column_values = df[col].copy().values
label_encoder = preprocessing.LabelEncoder()
df[new_column_name] = label_encoder.fit_transform(column_values)
if verbose: print('unique:', str(df[new_column_name].nunique()).ljust(7))
if drop_orig_cols: df = df.drop(col, axis=1)
df = df.drop('tmp', axis=1)
return df, new_columns
def create_one_hot_encoding(df, columns: list, min_pctg_to_keep=0.03, return_new_cols=True, verbose=True):
"""
Adds one-hot encoded columns for each categorical column
"""
max_col_length = len(max(columns, key=len))
new_columns = []
print('creating one-hot columns:')
for column in columns:
#new_columns = [column + "_" + i for i in full[column].unique()] #only use the columns that appear in the test set and add prefix like in get_dummies
if verbose: print('-', column.ljust(max_col_length), end=' ')
if df[column].nunique() > 500:
print('too many unique values', df[column].nunique())
else:
one_hot_df = pd.get_dummies(df[column], prefix=f'ft_{column}__one_hot_')
orig_col_number = len(one_hot_df.columns)
keep_cols = (one_hot_df.sum()/len(one_hot_df))>=min_pctg_to_keep
one_hot_df = one_hot_df.loc[:, keep_cols]
if verbose: print(f'keep {len(one_hot_df.columns)}/{orig_col_number} one-hot columns')
# drop columns if they already exist, in case function is called twice
df = df.drop(one_hot_df.columns, axis=1, errors='ignore')
df = pd.concat((df, one_hot_df), axis = 1)
new_columns.extend(list(one_hot_df.columns))
new_columns = list(set(new_columns))
return df, new_columns
def target_encode_smooth_mean(df, catg_columns:list, target_col:str, train_index,
smoothing_factor=3, std_noise_factor=0.01, verbose=True):
"""
Add smoothed mean target encoding.
"""
max_col_length = len(max(catg_columns, key=len))
# Compute the global mean
train_mean = df['target'].mean()
print('global mean:', train_mean)
for col in catg_columns:
# Compute the number of values and the mean of each group for train data only
grouped = df.loc[train_index, :].groupby(col)['target'].agg(['count', 'mean', 'std'])
counts, means, stds = grouped['count'], grouped['mean'], grouped['std']
# Compute the smoothed means
smooth_mean = (counts*means + smoothing_factor*train_mean) / (counts + smoothing_factor)
if isinstance(col, str):
new_column_name = f'ft_{col}__target_enc_mean_smooth{smoothing_factor}'
df[new_column_name] = df[col].map(smooth_mean)
# Add noise
if std_noise_factor is not None:
# add column with scaled standard deviation
df['tmp_stds_with_noise'] = df[col].map(stds)*std_noise_factor
elif isinstance(col, list):
col_str = '_'.join(col)
new_column_name = f'ft_{col_str}__target_enc_mean_smooth{smoothing_factor}'
# remove column if already exist from previous execution of same function to prevent merge-duplicates
df = df.drop(new_column_name, axis=1, errors='ignore')
smooth_mean_df = pd.DataFrame(smooth_mean).reset_index().rename(columns={0:new_column_name})
df = | pd.merge(df, smooth_mean_df, how='left', on=col) | pandas.merge |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
def test_constructors_errors(self):
# scalar
msg = ('IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex([0, 1])
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_intervals([0, 1])
# invalid closed
msg = "invalid options for 'closed': invalid"
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed within intervals
msg = 'intervals must all be closed on the same side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with tm.assert_raises_regex(ValueError, msg):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# mismatched closed inferred from intervals vs constructor.
msg = 'conflicting values for closed'
with tm.assert_raises_regex(ValueError, msg):
iv = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
IntervalIndex(iv, closed='neither')
# no point in nesting periods in an IntervalIndex
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
# decreasing breaks/arrays
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(range(10, -1, -1))
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1))
def test_constructors_datetimelike(self, closed):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx, closed=closed)
expected = IntervalIndex.from_breaks(idx.values, closed=closed)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert not index.hasnans
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(pd.date_range('20130101', periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='foo')
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='bar')
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed)
assert not expected.equals(expected_other_closed)
def test_astype(self, closed):
idx = self.create_index(closed=closed)
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_delete(self, closed):
expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed)
result = self.create_index(closed=closed).delete(0)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [
interval_range(0, periods=10, closed='neither'),
interval_range(1.7, periods=8, freq=2.5, closed='both'),
interval_range(Timestamp('20170101'), periods=12, closed='left'),
interval_range(Timedelta('1 day'), periods=6, closed='right'),
IntervalIndex.from_tuples([('a', 'd'), ('e', 'j'), ('w', 'z')]),
IntervalIndex.from_tuples([(1, 2), ('a', 'z'), (3.14, 6.28)])])
def test_insert(self, data):
item = data[0]
idx_item = IntervalIndex([item])
# start
expected = idx_item.append(data)
result = data.insert(0, item)
tm.assert_index_equal(result, expected)
# end
expected = data.append(idx_item)
result = data.insert(len(data), item)
tm.assert_index_equal(result, expected)
# mid
expected = data[:3].append(idx_item).append(data[3:])
result = data.insert(3, item)
tm.assert_index_equal(result, expected)
# invalid type
msg = 'can only insert Interval objects and NA into an IntervalIndex'
with tm.assert_raises_regex(ValueError, msg):
data.insert(1, 'foo')
# invalid closed
msg = 'inserted item must be closed on the same side as the index'
for closed in {'left', 'right', 'both', 'neither'} - {item.closed}:
with tm.assert_raises_regex(ValueError, msg):
bad_item = Interval(item.left, item.right, closed=closed)
data.insert(1, bad_item)
# GH 18295 (test missing)
na_idx = IntervalIndex([np.nan], closed=data.closed)
for na in (np.nan, pd.NaT, None):
expected = data[:1].append(na_idx).append(data[1:])
result = data.insert(1, na)
tm.assert_index_equal(result, expected)
def test_take(self, closed):
index = self.create_index(closed=closed)
result = index.take(range(10))
tm.assert_index_equal(result, index)
result = index.take([0, 0, 1])
expected = IntervalIndex.from_arrays(
[0, 0, 1], [1, 1, 2], closed=closed)
tm.assert_index_equal(result, expected)
def test_unique(self, closed):
# unique non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_unique
# unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed)
assert idx.is_unique
# unique overlapping - shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_unique
# unique nested
idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed)
assert idx.is_unique
# duplicate
idx = IntervalIndex.from_tuples(
[(0, 1), (0, 1), (2, 3)], closed=closed)
assert not idx.is_unique
# unique mixed
idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')], closed=closed)
assert idx.is_unique
# duplicate mixed
idx = IntervalIndex.from_tuples(
[(0, 1), ('a', 'b'), (0, 1)], closed=closed)
assert not idx.is_unique
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_unique
def test_monotonic(self, closed):
# increasing non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing non-overlapping
idx = IntervalIndex.from_tuples(
[(4, 5), (2, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (4, 5), (2, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping
idx = IntervalIndex.from_tuples(
[(0, 2), (0.5, 2.5), (1, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping
idx = IntervalIndex.from_tuples(
[(1, 3), (0.5, 2.5), (0, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered overlapping
idx = IntervalIndex.from_tuples(
[(0.5, 2.5), (0, 2), (1, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(2, 3), (1, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# stationary
idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed)
assert idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
def test_get_item(self, closed):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed=closed)
assert i[0] == Interval(0.0, 1.0, closed=closed)
assert i[1] == Interval(1.0, 2.0, closed=closed)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed=closed)
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed=closed)
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed=closed)
tm.assert_index_equal(result, expected)
def test_get_loc_value(self):
pytest.raises(KeyError, self.index.get_loc, 0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
pytest.raises(KeyError, self.index.get_loc, -1)
pytest.raises(KeyError, self.index.get_loc, 3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='int64'))
assert idx.get_loc(3) == 1
pytest.raises(KeyError, idx.get_loc, 3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
pytest.raises(KeyError, idx.get_loc, 1.5)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)],
closed='both')
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(1, 2) == (0, 2)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
with pytest.raises(KeyError):
index.slice_locs(1, 2)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
pytest.raises(KeyError, self.index.get_loc, Interval(2, 3))
pytest.raises(KeyError, self.index.get_loc,
Interval(-1, 0, 'left'))
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
def testcontains(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
assert i.contains(0.1)
assert i.contains(0.5)
assert i.contains(1)
assert i.contains(Interval(0, 1))
assert i.contains(Interval(0, 2))
# these overlaps completely
assert i.contains(Interval(0, 3))
assert i.contains(Interval(1, 3))
assert not i.contains(20)
assert not i.contains(-20)
def test_dropna(self, closed):
expected = IntervalIndex.from_tuples(
[(0.0, 1.0), (1.0, 2.0)], closed=closed)
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays(
[0, 1, np.nan], [1, 2, np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
def test_non_contiguous(self, closed):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed)
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
def test_union(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(13), closed=closed)
result = index.union(other)
tm.assert_index_equal(result, expected)
result = other.union(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.union(index), index)
tm.assert_index_equal(index.union(index[:1]), index)
def test_intersection(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(5, 11), closed=closed)
result = index.intersection(other)
tm.assert_index_equal(result, expected)
result = other.intersection(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.intersection(index), index)
def test_difference(self, closed):
index = self.create_index(closed=closed)
tm.assert_index_equal(index.difference(index[:1]), index[1:])
def test_symmetric_difference(self, closed):
idx = self.create_index(closed=closed)
result = idx[1:].symmetric_difference(idx[:-1])
expected = IntervalIndex([idx[0], idx[-1]])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('op_name', [
'union', 'intersection', 'difference', 'symmetric_difference'])
def test_set_operation_errors(self, closed, op_name):
index = self.create_index(closed=closed)
set_op = getattr(index, op_name)
# test errors
msg = ('can only do set operations between two IntervalIndex objects '
'that are closed on the same side')
with tm.assert_raises_regex(ValueError, msg):
set_op(Index([1, 2, 3]))
for other_closed in {'right', 'left', 'both', 'neither'} - {closed}:
other = self.create_index(closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
set_op(other)
def test_isin(self, closed):
index = self.create_index(closed=closed)
expected = np.array([True] + [False] * (len(index) - 1))
result = index.isin(index[:1])
tm.assert_numpy_array_equal(result, expected)
result = index.isin([index[0]])
tm.assert_numpy_array_equal(result, expected)
other = IntervalIndex.from_breaks(np.arange(-2, 10), closed=closed)
expected = np.array([True] * (len(index) - 1) + [False])
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
for other_closed in {'right', 'left', 'both', 'neither'}:
other = self.create_index(closed=other_closed)
expected = np.repeat(closed == other_closed, len(index))
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
def test_comparison(self):
actual = Interval(0, 1) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = Interval(0.5, 1.5) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > Interval(0.5, 1.5)
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index <= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index >= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index < self.index
expected = np.array([False, False])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == IntervalIndex.from_breaks([0, 1, 2], 'left')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index.values == self.index
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index <= self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index != self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index > self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index.values > self.index
tm.assert_numpy_array_equal(actual, np.array([False, False]))
# invalid comparisons
actual = self.index == 0
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index == self.index.left
tm.assert_numpy_array_equal(actual, np.array([False, False]))
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index > 0
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index <= 0
with pytest.raises(TypeError):
self.index > np.arange(2)
with pytest.raises(ValueError):
self.index > np.arange(3)
def test_missing_values(self, closed):
idx = Index([np.nan, Interval(0, 1, closed=closed),
Interval(1, 2, closed=closed)])
idx2 = IntervalIndex.from_arrays(
[np.nan, 0, 1], [np.nan, 1, 2], closed=closed)
assert idx.equals(idx2)
with pytest.raises(ValueError):
IntervalIndex.from_arrays(
[np.nan, 0, 1], np.array([0, 1, 2]), closed=closed)
tm.assert_numpy_array_equal(isna(idx),
np.array([True, False, False]))
def test_sort_values(self, closed):
index = self.create_index(closed=closed)
result = index.sort_values()
tm.assert_index_equal(result, index)
result = index.sort_values(ascending=False)
tm.assert_index_equal(result, index[::-1])
# with nan
index = IntervalIndex([Interval(1, 2), np.nan, Interval(0, 1)])
result = index.sort_values()
expected = IntervalIndex([Interval(0, 1), Interval(1, 2), np.nan])
tm.assert_index_equal(result, expected)
result = index.sort_values(ascending=False)
expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)])
tm.assert_index_equal(result, expected)
def test_datetime(self):
dates = date_range('2000', periods=3)
idx = IntervalIndex.from_breaks(dates)
tm.assert_index_equal(idx.left, dates[:2])
tm.assert_index_equal(idx.right, dates[-2:])
expected = date_range('2000-01-01T12:00', periods=2)
tm.assert_index_equal(idx.mid, expected)
assert Timestamp('2000-01-01T12') not in idx
assert Timestamp('2000-01-01T12') not in idx
target = date_range('1999-12-31T12:00', periods=7, freq='12H')
actual = idx.get_indexer(target)
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_append(self, closed):
index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed)
index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed)
result = index1.append(index2)
expected = IntervalIndex.from_arrays(
[0, 1, 1, 2], [1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
result = index1.append([index1, index2])
expected = IntervalIndex.from_arrays(
[0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
msg = ('can only append two IntervalIndex objects that are closed '
'on the same side')
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
index_other_closed = IntervalIndex.from_arrays(
[0, 1], [1, 2], closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
index1.append(index_other_closed)
def test_is_non_overlapping_monotonic(self, closed):
# Should be True in all cases
tpls = [(0, 1), (2, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is True
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is True
# Should be False in all cases (overlapping)
tpls = [(0, 2), (1, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False in all cases (non-monotonic)
tpls = [(0, 1), (2, 3), (6, 7), (4, 5)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False for closed='both', overwise True (GH16560)
if closed == 'both':
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is False
else:
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is True
class TestIntervalRange(object):
def test_construction_from_numeric(self, closed, name):
# combinations of start/end/periods without freq
expected = IntervalIndex.from_breaks(
np.arange(0, 6), name=name, closed=closed)
result = interval_range(start=0, end=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=5, periods=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with freq
expected = IntervalIndex.from_tuples([(0, 2), (2, 4), (4, 6)],
name=name, closed=closed)
result = interval_range(start=0, end=6, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=3, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=6, periods=3, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
expected = IntervalIndex.from_tuples([(0.0, 1.5), (1.5, 3.0)],
name=name, closed=closed)
result = interval_range(start=0, end=4, freq=1.5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
def test_construction_from_timestamp(self, closed, name):
# combinations of start/end/periods without freq
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-06')
breaks = date_range(start=start, end=end)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with fixed freq
freq = '2D'
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-07')
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timestamp('2017-01-08')
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with non-fixed freq
freq = 'M'
start, end = Timestamp('2017-01-01'), Timestamp('2017-12-31')
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=11, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=11, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timestamp('2018-01-15')
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
def test_construction_from_timedelta(self, closed, name):
# combinations of start/end/periods without freq
start, end = Timedelta('1 day'), Timedelta('6 days')
breaks = timedelta_range(start=start, end=end)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with fixed freq
freq = '2D'
start, end = Timedelta('1 day'), Timedelta('7 days')
breaks = timedelta_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = | Timedelta('7 days 1 hour') | pandas.Timedelta |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
Index,
NaT,
Series,
date_range,
offsets,
)
import pandas._testing as tm
class TestDataFrameShift:
@pytest.mark.parametrize(
"input_data, output_data",
[(np.empty(shape=(0,)), []), (np.ones(shape=(2,)), [np.nan, 1.0])],
)
def test_shift_non_writable_array(self, input_data, output_data, frame_or_series):
# GH21049 Verify whether non writable numpy array is shiftable
input_data.setflags(write=False)
result = frame_or_series(input_data).shift(1)
if frame_or_series is not Series:
# need to explicitly specify columns in the empty case
expected = frame_or_series(
output_data,
index=range(len(output_data)),
columns=range(1),
dtype="float64",
)
else:
expected = frame_or_series(output_data, dtype="float64")
tm.assert_equal(result, expected)
def test_shift_mismatched_freq(self, frame_or_series):
ts = frame_or_series(
np.random.randn(5), index=date_range("1/1/2000", periods=5, freq="H")
)
result = ts.shift(1, freq="5T")
exp_index = ts.index.shift(1, freq="5T")
tm.assert_index_equal(result.index, exp_index)
# GH#1063, multiple of same base
result = ts.shift(1, freq="4H")
exp_index = ts.index + offsets.Hour(4)
tm.assert_index_equal(result.index, exp_index)
@pytest.mark.parametrize(
"obj",
[
Series([np.arange(5)]),
date_range("1/1/2011", periods=24, freq="H"),
Series(range(5), index=date_range("2017", periods=5)),
],
)
@pytest.mark.parametrize("shift_size", [0, 1, 2])
def test_shift_always_copy(self, obj, shift_size, frame_or_series):
# GH#22397
if frame_or_series is not Series:
obj = obj.to_frame()
assert obj.shift(shift_size) is not obj
def test_shift_object_non_scalar_fill(self):
# shift requires scalar fill_value except for object dtype
ser = Series(range(3))
with pytest.raises(ValueError, match="fill_value must be a scalar"):
ser.shift(1, fill_value=[])
df = ser.to_frame()
with pytest.raises(ValueError, match="fill_value must be a scalar"):
df.shift(1, fill_value=np.arange(3))
obj_ser = ser.astype(object)
result = obj_ser.shift(1, fill_value={})
assert result[0] == {}
obj_df = obj_ser.to_frame()
result = obj_df.shift(1, fill_value={})
assert result.iloc[0, 0] == {}
def test_shift_int(self, datetime_frame, frame_or_series):
ts = tm.get_obj(datetime_frame, frame_or_series).astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
tm.assert_equal(shifted, expected)
def test_shift_32bit_take(self, frame_or_series):
# 32-bit taking
# GH#8129
index = date_range("2000-01-01", periods=5)
for dtype in ["int32", "int64"]:
arr = np.arange(5, dtype=dtype)
s1 = frame_or_series(arr, index=index)
p = arr[1]
result = s1.shift(periods=p)
expected = frame_or_series([np.nan, 0, 1, 2, 3], index=index)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("periods", [1, 2, 3, 4])
def test_shift_preserve_freqstr(self, periods, frame_or_series):
# GH#21275
obj = frame_or_series(
range(periods),
index=date_range("2016-1-1 00:00:00", periods=periods, freq="H"),
)
result = obj.shift(1, "2H")
expected = frame_or_series(
range(periods),
index=date_range("2016-1-1 02:00:00", periods=periods, freq="H"),
)
tm.assert_equal(result, expected)
def test_shift_dst(self, frame_or_series):
# GH#13926
dates = date_range("2016-11-06", freq="H", periods=10, tz="US/Eastern")
obj = frame_or_series(dates)
res = obj.shift(0)
tm.assert_equal(res, obj)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
res = obj.shift(1)
exp_vals = [NaT] + dates.astype(object).values.tolist()[:9]
exp = frame_or_series(exp_vals)
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
res = obj.shift(-2)
exp_vals = dates.astype(object).values.tolist()[2:] + [NaT, NaT]
exp = frame_or_series(exp_vals)
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
for ex in [10, -10, 20, -20]:
res = obj.shift(ex)
exp = frame_or_series([NaT] * 10, dtype="datetime64[ns, US/Eastern]")
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
def test_shift_by_zero(self, datetime_frame, frame_or_series):
# shift by 0
obj = tm.get_obj(datetime_frame, frame_or_series)
unshifted = obj.shift(0)
tm.assert_equal(unshifted, obj)
def test_shift(self, datetime_frame):
# naive shift
ser = datetime_frame["A"]
shifted = datetime_frame.shift(5)
tm.assert_index_equal(shifted.index, datetime_frame.index)
shifted_ser = ser.shift(5)
tm.assert_series_equal(shifted["A"], shifted_ser)
shifted = datetime_frame.shift(-5)
tm.assert_index_equal(shifted.index, datetime_frame.index)
shifted_ser = ser.shift(-5)
tm.assert_series_equal(shifted["A"], shifted_ser)
unshifted = datetime_frame.shift(5).shift(-5)
tm.assert_numpy_array_equal(
unshifted.dropna().values, datetime_frame.values[:-5]
)
unshifted_ser = ser.shift(5).shift(-5)
tm.assert_numpy_array_equal(unshifted_ser.dropna().values, ser.values[:-5])
def test_shift_by_offset(self, datetime_frame, frame_or_series):
# shift by DateOffset
obj = tm.get_obj(datetime_frame, frame_or_series)
offset = | offsets.BDay() | pandas.offsets.BDay |
"""
Agentpy Output Module
Content: DataDict class for output data
"""
import pandas as pd
import os
from os import listdir, makedirs
from os.path import getmtime, join
from SALib.analyze import sobol
from .tools import AttrDict, make_list, AgentpyError
import json
import numpy as np
class NpEncoder(json.JSONEncoder):
""" Adds support for numpy number formats to json. """
# By <NAME> https://stackoverflow.com/a/57915246
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
def _last_exp_id(name, path):
""" Identifies existing experiment data and return highest id. """
exp_id = 0
output_dirs = listdir(path)
exp_dirs = [s for s in output_dirs if name in s]
if exp_dirs:
ids = [int(s.split('_')[-1]) for s in exp_dirs]
exp_id = max(ids)
return exp_id
# TODO Create DataSubDict without methods
class DataDict(AttrDict):
""" Nested dictionary for output data of simulations.
Items can be accessed like attributes.
Attributes can differ from the standard ones listed below.
Attributes:
info (dict):
Metadata of the simulation.
parameters (DataDict):
Simulation parameters.
variables (DataDict):
Recorded variables, separatedper object type.
reporters (pandas.DataFrame):
Reported outcomes of the simulation.
sensitivity (DataDict):
Sensitivity data, if calculated.
"""
def __repr__(self, indent=False):
rep = ""
if not indent:
rep += "DataDict {"
i = ' ' if indent else ''
for k, v in self.items():
rep += f"\n{i}'{k}': "
if isinstance(v, (int, float, np.integer, np.floating)):
rep += f"{v} {type(v)}"
elif isinstance(v, str):
x0 = f"(length {len(v)})"
x = f"...' {x0}" if len(v) > 20 else "'"
rep += f"'{v[:30]}{x} {type(v)}"
elif isinstance(v, pd.DataFrame):
lv = len(list(v.columns))
rv = len(list(v.index))
rep += f"DataFrame with {lv} " \
f"variable{'s' if lv != 1 else ''} " \
f"and {rv} row{'s' if rv != 1 else ''}"
elif isinstance(v, DataDict):
rep += f"{v.__repr__(indent=True)}"
elif isinstance(v, dict):
lv = len(list(v.keys()))
rep += f"Dictionary with {lv} key{'s' if lv != 1 else ''}"
elif isinstance(v, list):
lv = len(v)
rep += f"List with {lv} entr{'ies' if lv != 1 else 'y'}"
else:
rep += f"Object of type {type(v)}"
if not indent:
rep += "\n}"
return rep
def _short_repr(self):
len_ = len(self.keys())
return f"DataDict {{{len_} entr{'y' if len_ == 1 else 'ies'}}}"
def __eq__(self, other):
""" Check equivalence of two DataDicts."""
if not isinstance(other, DataDict):
return False
for key, item in self.items():
if key not in other:
return False
if isinstance(item, pd.DataFrame):
if not self[key].equals(other[key]):
return False
elif not self[key] == other[key]:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
# Data analysis --------------------------------------------------------- #
@staticmethod
def _sobol_set_df_index(df, p_keys, reporter):
df['parameter'] = p_keys
df['reporter'] = reporter
df.set_index(['reporter', 'parameter'], inplace=True)
def calc_sobol(self, reporters=None, **kwargs):
""" Calculates Sobol Sensitivity Indices
using :func:`SALib.analyze.sobol.analyze`.
Data must be from an :class:`Experiment` with a :class:`Sample`
that was generated with the method 'saltelli'.
If the experiment had more than one iteration,
the mean value between iterations will be taken.
Arguments:
reporters (str or list of str, optional): The reporters that should
be used for the analysis. If none are passed, all are used.
**kwargs: Will be forwarded to :func:`SALib.analyze.sobol.analyze`.
Returns:
DataDict: The DataDict itself with an added category 'sensitivity'.
"""
if not self.parameters.log['type'] == 'saltelli':
raise AgentpyError("Sampling method must be 'saltelli'.")
if self.info['iterations'] == 1:
reporters_df = self.reporters
else:
reporters_df = self.reporters.groupby('sample_id').mean()
# STEP 1 - Load salib problem from parameter log
param_ranges_salib = self.parameters.log['salib_problem']
calc_second_order = self.parameters.log['calc_second_order']
# STEP 2 - Calculate Sobol Sensitivity Indices
if reporters is None:
reporters = reporters_df.columns
if isinstance(reporters, str):
reporters = [reporters]
p_keys = self._combine_pars(sample=True, constants=False).keys()
dfs_list = [[] for _ in range(4 if calc_second_order else 2)]
for reporter in reporters:
y = np.array(reporters_df[reporter])
si = sobol.analyze(param_ranges_salib, y, calc_second_order, **kwargs)
# Make dataframes out of S1 and ST sensitivities
keyss = [['S1', 'ST'], ['S1_conf', 'ST_conf']]
for keys, dfs in zip(keyss, dfs_list[0:2]):
s = {k[0:2]: v for k, v in si.items() if k in keys}
df = pd.DataFrame(s)
self._sobol_set_df_index(df, p_keys, reporter)
dfs.append(df)
# Make dataframes out S2 sensitivities
if calc_second_order:
for key, dfs in zip(['S2', 'S2_conf'], dfs_list[2:4]):
df = pd.DataFrame(si[key])
self._sobol_set_df_index(df, p_keys, reporter)
dfs.append(df)
# Combine dataframes for each reporter
self['sensitivity'] = sdict = DataDict()
sdict['sobol'] = pd.concat(dfs_list[0])
sdict['sobol_conf'] = pd.concat(dfs_list[1])
if calc_second_order:
# Add Second-Order to self
dfs_si = [sdict['sobol'], pd.concat(dfs_list[2])]
dfs_si_conf = [sdict['sobol_conf'], pd.concat(dfs_list[3])]
sdict['sobol'] = pd.concat(dfs_si, axis=1)
sdict['sobol_conf'] = pd.concat(dfs_si_conf, axis=1)
# Create Multi-Index for Columns
arrays = [["S1", "ST"] + ["S2"] * len(p_keys), [""] * 2 + list(p_keys)]
tuples = list(zip(*arrays))
index = pd.MultiIndex.from_tuples(tuples, names=["order", "parameter"])
sdict['sobol'].columns = index
sdict['sobol_conf'].columns = index.copy()
return self
# Data arrangement ------------------------------------------------------ #
def _combine_vars(self, obj_types=True, var_keys=True):
""" Returns pandas dataframe with combined variables """
# Retrieve variables
if 'variables' in self:
vs = self['variables']
else:
return None
if len(vs.keys()) == 1:
return list(vs.values())[0] # Return df if vs has only one entry
elif isinstance(vs, DataDict):
df_dict = dict(vs) # Convert to dict if vs is DataDict
# Remove dataframes that don't include any of the selected var_keys
if var_keys is not True:
df_dict = {k: v for k, v in df_dict.items()
if any(x in v.columns for x in make_list(var_keys))}
# Select object types
if obj_types is not True:
df_dict = {k: v for k, v in df_dict.items()
if k in make_list(obj_types)}
# Add 'obj_id' before 't' for model df
model_type = self.info['model_type']
if model_type in list(df_dict.keys()):
df = df_dict[model_type]
df['obj_id'] = 0
indexes = list(df.index.names)
indexes.insert(-1, 'obj_id')
df = df.reset_index()
df = df.set_index(indexes)
df_dict[model_type] = df
# Return none if empty
if df_dict == {}:
return None
# Create dataframe
df = pd.concat(df_dict) # Dict keys (obj_type) will be added to index
df.index = df.index.set_names('obj_type', level=0) # Rename new index
# Select var_keys
if var_keys is not True:
# make_list prevents conversion to pd.Series for single value
df = df[make_list(var_keys)]
return df
def _dict_pars_to_df(self, dict_pars):
n = self.info['sample_size'] if 'sample_size' in self.info else 1
d = {k: [v] * n for k, v in dict_pars.items()}
i = pd.Index(list(range(n)), name='sample_id')
return pd.DataFrame(d, index=i)
def _combine_pars(self, sample=True, constants=True):
""" Returns pandas dataframe with parameters and sample_id """
# Cancel if there are no parameters
if 'parameters' not in self:
return None
dfp = pd.DataFrame()
if sample and 'sample' in self.parameters:
dfp = self.parameters.sample.copy()
if constants and 'constants' in self.parameters:
for k, v in self.parameters.constants.items():
dfp[k] = v
elif constants and 'constants' in self.parameters:
dfp = self._dict_pars_to_df(self.parameters.constants)
# Cancel if no parameters have been selected
if dfp is None or dfp.empty is True:
return None
return dfp
def arrange(self, variables=False, reporters=False, parameters=False,
constants=False, obj_types=True, index=False):
""" Combines and/or filters data based on passed arguments.
Arguments:
variables (bool or str or list of str, optional):
Key or list of keys of variables to include in the dataframe.
If True, all available variables are selected.
If False (default), no variables are selected.
reporters (bool or str or list of str, optional):
Key or list of keys of reporters to include in the dataframe.
If True, all available reporters are selected.
If False (default), no reporters are selected.
parameters (bool or str or list of str, optional):
Key or list of keys of parameters to include in the dataframe.
If True, all non-constant parameters are selected.
If False (default), no parameters are selected.
constants (bool, optional):
Include constants if 'parameters' is True (default False).
obj_types (str or list of str, optional):
Agent and/or environment types to include in the dataframe.
If True (default), all objects are selected.
If False, no objects are selected.
index (bool, optional):
Whether to keep original multi-index structure (default False).
Returns:
pandas.DataFrame: The newly arranged dataframe.
"""
dfv = dfm = dfp = df = None
# Step 1: Variables
if variables is not False:
dfv = self._combine_vars(obj_types, variables)
# Step 2: Measures
if reporters is not False:
dfm = self.reporters
if reporters is not True: # Select reporter keys
# make_list prevents conversion to pd.Series for single value
dfm = dfm[make_list(reporters)]
# Step 3: Parameters
if parameters is True:
dfp = self._combine_pars(constants=constants)
elif parameters is not False:
dfp = self._combine_pars()
dfp = dfp[make_list(parameters)]
# Step 4: Combine dataframes
if dfv is not None and dfm is not None:
# Combine variables & measures
index_keys = dfv.index.names
dfm = dfm.reset_index()
dfv = dfv.reset_index()
df = pd.concat([dfm, dfv])
df = df.set_index(index_keys)
elif dfv is not None:
df = dfv
elif dfm is not None:
df = dfm
if dfp is not None:
if df is None:
df = dfp
else: # Combine df with parameters
if df is not None and isinstance(df.index, pd.MultiIndex):
dfp = dfp.reindex(df.index, level='sample_id')
df = pd.concat([df, dfp], axis=1)
if df is None:
return pd.DataFrame()
# Step 6: Reset index
if not index:
df = df.reset_index()
return df
def arrange_reporters(self):
""" Common use case of :obj:`DataDict.arrange`
with `reporters=True` and `parameters=True`. """
return self.arrange(variables=False, reporters=True, parameters=True)
def arrange_variables(self):
""" Common use case of :obj:`DataDict.arrange`
with `variables=True` and `parameters=True`. """
return self.arrange(variables=True, reporters=False, parameters=True)
# Saving and loading data ----------------------------------------------- #
def save(self, exp_name=None, exp_id=None, path='ap_output', display=True):
""" Writes data to directory `{path}/{exp_name}_{exp_id}/`.
Works only for entries that are of type :class:`DataDict`,
:class:`pandas.DataFrame`, or serializable with JSON
(int, float, str, dict, list). Numpy objects will be converted
to standard objects, if possible.
Arguments:
exp_name (str, optional): Name of the experiment to be saved.
If none is passed, `self.info['model_type']` is used.
exp_id (int, optional): Number of the experiment.
Note that passing an existing id can overwrite existing data.
If none is passed, a new id is generated.
path (str, optional): Target directory (default 'ap_output').
display (bool, optional): Display saving progress (default True).
"""
# Create output directory if it doesn't exist
if path not in listdir():
makedirs(path)
# Set exp_name
if exp_name is None:
if 'info' in self and 'model_type' in self.info:
exp_name = self.info['model_type']
else:
exp_name = 'Unnamed'
exp_name = exp_name.replace(" ", "_")
# Set exp_id
if exp_id is None:
exp_id = _last_exp_id(exp_name, path) + 1
# Create new directory for output
path = f'{path}/{exp_name}_{exp_id}'
makedirs(path)
# Save experiment data
for key, output in self.items():
if isinstance(output, pd.DataFrame):
output.to_csv(f'{path}/{key}.csv')
elif isinstance(output, DataDict):
for k, o in output.items():
if isinstance(o, pd.DataFrame):
o.to_csv(f'{path}/{key}_{k}.csv')
elif isinstance(o, dict):
with open(f'{path}/{key}_{k}.json', 'w') as fp:
json.dump(o, fp, cls=NpEncoder)
else: # Use JSON for other object types
try:
with open(f'{path}/{key}.json', 'w') as fp:
json.dump(output, fp, cls=NpEncoder)
except TypeError as e:
print(f"Warning: Object '{key}' could not be saved. "
f"(Reason: {e})")
os.remove(f'{path}/{key}.json')
# TODO Support grids & graphs
# elif t == nx.Graph:
# nx.write_graphml(output, f'{path}/{key}.graphml')
if display:
print(f"Data saved to {path}")
def _load(self, exp_name=None, exp_id=None,
path='ap_output', display=True):
def load_file(path, file, display):
if display:
print(f'Loading {file} - ', end='')
i_cols = ['sample_id', 'iteration', 'obj_id', 't']
ext = file.split(".")[-1]
path = path + file
try:
if ext == 'csv':
obj = | pd.read_csv(path) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 8 20:06:21 2019
@author: rachellu
"""
import pandas as pd
#data02 = pd.read_csv("/Users/rachellu/Dropbox/capstone/pickjob/PickJob_02.csv")
#data03 = pd.read_csv("/Users/rachellu/Dropbox/capstone/pickjob/PickJob_03.csv")
#data04 = pd.read_csv("/Users/rachellu/Dropbox/capstone/pickjob/PickJob_04.csv")
#data05 = pd.read_csv("/Users/rachellu/Dropbox/capstone/pickjob/05.csv")
#data06 = pd.read_csv("/Users/rachellu/Dropbox/capstone/pickjob/06.csv")
#data07 = pd.read_csv("/Users/rachellu/Dropbox/capstone/pickjob/07.csv")
data02 = | pd.read_csv("/Users/rachellu/Dropbox/capstone/pickorder/02.csv") | pandas.read_csv |
import yfinance as yf
import numpy as np
import pandas as pd
from sklearn import preprocessing
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
import keras
import tensorflow as tf
from keras.models import Model
from keras.layers import Dense, Dropout, LSTM, Input, Activation, concatenate
from keras import optimizers
import numpy as np
from keras.models import Sequential
from keras.utils import plot_model
from keras.models import load_model
# Yahoo Finance Library for getting historical stock data from Yahoo finance
import os.path, time
import datetime
def get_stock_data(stock_list,start_date,end_date):
'''
Download stock data from Yahoo Finance as listed in stock_list
starting from start date to end date and save to CSV file.
The default path is the same path as the running script.
Input:
- stock_list (String) : String of Yahoo Finance's ticker name separated by space. For example, stockA stockB stockC ...
- start_date (String) : String of start date in format DD/MM/YYYY.
- end_date (String) : String of end date in format DD/MM/YYYY.
Output:
- No return value
- The csv file will be written with the naming convention as ticker.csv
'''
for stock in stock_list:
try:
file = stock+'.csv'
print('Downloading data from Yahoo Finance...')
data = yf.download(stock, start=start_date, end=end_date)
pd.DataFrame(data).to_csv(file)
except Exception as e:
print("exception "+str(e)+"on "+stock)
print("start date = "+start_date)
print("end date = "+end_date)
def dataset_preparation(filename, history_points, predict_range, y_normaliser, mode='file', df=None):
'''
This function will prepare data and make it ready for training and testing the model by receiving the CSV file path or dataframe
equivalent to Yahoo Finance's historical data with other parameters, normalize to 0-1 range value, separate into train and test and return the result.
Input:
- filename (String) : The file path for csv file containing historical stock data downloaded from Yahoo Finance.
- history_points (Number) : The number of day range for historical data to be used for training the model.
- predict_range (Number) : The range of day to forecast the price.
- y_normmalizer (preprocessing.MinMaxScaler Object) : Preprocessor for normalize the price for forecast data to be 0-1. We need this so that we could use it again for scaling up the data back to normal value.
- df (DataFrame) : The dataframe input of the dataset. If the filename is passed and the mode is set to'file' it will be ignored.
- mode : If it is 'file' the function will ignore df otherwise it will assume that df will be provided for preparation. This is used in the case that the data is read from CSV and append with other data features.
Output:
- ohlcv_histories_normalised (Array) : Array of the data features. One row consist of [day-1-open,day-1-max,day-1-min,...day-history_point ].
- next_day_adjclose_values_normalised (Array) : Array of normalised Adj Close values transformed in the format of [day1-adj close,day2-adj close....day-predict_range adj close].
- next_day_adjclose_values (Array) : Array of actual Adj Close values transformed in the format of [day1-adj close,day2-adj close....day-predict_range adj close].
- y_normaliser (preprocessing.MinMaxScaler Object) : After we fit the actual value, we return it back so that it can be used again to scale the normalized result.
'''
# Prepare data per mode - file or dataframe input
if mode=='file':
# If it is file mode the function expect CSV file path to read the data
df = pd.read_csv(filename)
# For both mode, we will drop row with null value as we can't use it anyway
df_na = df.dropna(axis=0)
# Drop Date as this is time series data, Date isn't used. Also drop Close as we will predict Adj Close.
df_na = df_na.drop(['Date','Close'],axis=1)
# Normalise all data to the value range of 0-1 as neural network algorithm has better performance with this data range
data_normaliser = preprocessing.MinMaxScaler()
data_normalised = data_normaliser.fit_transform(df_na)
# Prepare the data in the format of [day-1-open,day-1-max,day-1-min,...day-history_point ] as 1 row input for predict the 'predict_range' price for train and test
ohlcv_histories_normalised = np.array([data_normalised[i : i + history_points].copy() for i in range(len(data_normalised) - history_points - predict_range +1)])
# Get the actual price [day1-adj close,day2-adj close....day-predict_range adj close] for train and test
next_day_adjclose_values_normalised = np.array([data_normalised[i + history_points:i + history_points + predict_range,3].copy() for i in range(len(data_normalised) - history_points - predict_range+1)])
# Create the same array as the normalised adj close but with the actual value not the scaled down value. This is used to calculate the prediction accuracy
next_day_adjclose_values = np.array([df_na.iloc[i + history_points:i + history_points+predict_range]['Adj Close'].values.copy() for i in range(len(df_na) - history_points - predict_range+1)])
# Use the passed normaliser to fit the actual value so that we can scale the predicted result back to actual value
y_normaliser.fit(next_day_adjclose_values)
return ohlcv_histories_normalised, next_day_adjclose_values_normalised, next_day_adjclose_values, y_normaliser
def get_LSTM_Model(layer_num, history_points, features_num,predict_range,optimizer,dropout_prob):
'''
This function will build LSTM model per provided parameters.
The model will be a simple one consist of one forget layer with configurable forget probability and configurable number of hidden layers.
Input:
- layer_num (Number) : The number of hidden layer.
- history_points (Number) : The number of data in the dataset.
- features_num (Number) : The number of features in the dataset.
- predict_range (Number) : The number of day to predict the stock price.
- optimizer (Number) : The optimizer's name e.g. adam.
- dropout_prob (Float) : Probability to forget the date on dropout layer.
Output:
- model (Object) : The compiled LSTM model per the provided parameters.
'''
# Initialize LSTM using Keras library
model = Sequential()
# Defining hidden layer number and the shape of the input (number of data in the dataset and the number of feature)
model.add(LSTM(layer_num, input_shape=(history_points, features_num)))
# Add forget (dropout) layer with probability per argument
model.add(Dropout(dropout_prob))
# End the network with hiddenlayer per the size of forecast day e.g. 1,5,10
model.add(Dense(predict_range))
# Build and return the model per the selected optimizer
model.compile(loss='mean_squared_error', optimizer=optimizer)
return model
def train_test_split(ohlcv_histories, next_day_adj_close, unscaled_y,test_split = 0.9):
'''
Split the dataset to train and test dataset per provideed ratio.
Input
- ohlcv_histories (Array) : The dataset in array.
- next_day_adj_close (Array) : The result of prediction using the dataset in array.
- unscaled_y (Array) : The same data as next_day_adj_close but not normalize to 0-1.
- test_split (Float) : The ratio of train per test.
Output
- ohlcv_train (Array) : The train dataset splitted per test_split ratio.
- ohlcv_test (Array) : The test dataset splitted per test_split ratio.
- y_test (Array) : The result of test dataset splitted per test_split ratio.
- y_train (Array) : The result of train dataset splitted per test_split ratio.
- unscaled_y_test (Array) : The unscaled y_test per test_split ratio.
'''
n = int(ohlcv_histories.shape[0] * test_split)
ohlcv_train = ohlcv_histories[:n]
y_train = next_day_adj_close[:n]
ohlcv_test = ohlcv_histories[n:]
y_test = next_day_adj_close[n:]
unscaled_y_test = unscaled_y[n:]
return ohlcv_train, ohlcv_test, y_test, y_train, unscaled_y_test
def train_predictor(ohlcv_train,y_train,ohlcv_test,y_normaliser,unscaled_y_test,hidden_layer,batch_size,epoch,dropout_probability,history_points,features_num,predict_range):
'''
Create LSTM model per provideded parameter, fit the train data and validate its accuracy using MSE.
Finally, retrun the result of MSE and the model object.
Input
- ohlcv_train (Array) : Train dataset in array.
- y_train (Array) : Train dataset result in array.
- ohlcv_test (Array) : Test dataset in array.
- y_normaliser (preprocessing.MinMaxScaler Object) : The normaliser instance that is used to scale down y_test. We will use it to scale up the result from test dataset.
- unscaled_y_test (Array) : The unscaled y_test using for validate the result.
- hidden_layer (Number) : LSTM parameter's number of hidden layer.
- batch_size (Number) : LSTM parameter's number of batch size.
- epoch (Number) : LSTM parameter's number of epoch.
- dropout_probability (Float) : LSTM parameter's dropout probability.
- history_points (Number) : LSTM parameter's the number of history data to train the model in each iteration.
- features_num (Number) : LSTM parameter's the number of features in the dataset.
- predict_range : LSTM parameter's the number of predict data to be predicted.
Output
- model (Object) : LSTM model which can be saved to h5 or use to predict the result with the new dataset.
- scaled_mse (Float) : Mean Squared Error of the model measured by using the unscaled result from test dataset minus the unscaled_y_test
'''
model = get_LSTM_Model(hidden_layer,history_points,features_num,predict_range,'adam',dropout_probability)
model.fit(x=ohlcv_train, y=y_train, batch_size=batch_size, epochs=epoch, shuffle=True, validation_split=0.1,verbose=0)
y_test_predicted = model.predict(ohlcv_test)
y_test_predicted = y_normaliser.inverse_transform(y_test_predicted)
real_mse = np.mean(np.square(unscaled_y_test - y_test_predicted))
scaled_mse = real_mse / (np.max(unscaled_y_test) - np.min(unscaled_y_test)) * 100
return model, scaled_mse
def train_and_validate_stock_predictor(stock,history_points,predict_range,hidden_layer,batch_size,epoch,dropout_probability, mode='file'):
'''
Encapsulate all activities to create LSTM model to predict the stock with parameters as provided.
There are 2 mode for this function. mode='file' will read data from csv file and not add additional features like macd and EMA
while other mode will read data from csv but also add macd and EMA as additional features.
Starting from transforming data, splitting to train/test, build and fit model, evaluate the model accuracy and return result.
Input
- stock (String) : Ticker per Yahoo Finance.
- history_points (Number) : LSTM parameter's the number of history data to train the model in each iteration.
- predict_range : LSTM parameter's the number of predict data to be predicted.
- hidden_layer (Number) : LSTM parameter's number of hidden layer.
- batch_size (Number) : LSTM parameter's number of batch size.
- epoch (Number) : LSTM parameter's number of epoch.
- dropout_probability (Float) : LSTM parameter's dropout probability.
Output
- model (Object) : LSTM model which can be saved to h5 or use to predict the result with the new dataset.
- scaled_mse (Float) : Mean Squared Error of the model measured by using the unscaled result from test dataset minus the unscaled_y_test
'''
# Read data and add MACD and EMA
if mode=='file':
features_num = 5
ohlcv_histories, next_day_adj_close, unscaled_y, y_normaliser = dataset_preparation(stock+'.csv',history_points,predict_range,preprocessing.MinMaxScaler())
else:
df = add_macd_ema(pd.read_csv(stock+'.csv'))
ohlcv_histories, next_day_adj_close, unscaled_y, y_normaliser = dataset_preparation('',history_points,predict_range,preprocessing.MinMaxScaler(),mode='df',df=df)
features_num = len(df.columns)-2
ohlcv_train, ohlcv_test, y_test, y_train, unscaled_y_test = train_test_split(ohlcv_histories,next_day_adj_close,unscaled_y)
model, scaled_mse = train_predictor(ohlcv_train,y_train,ohlcv_test,y_normaliser,unscaled_y_test,hidden_layer,batch_size,epoch,dropout_probability,history_points,features_num,predict_range)
return model, scaled_mse
def add_macd_ema(df,ema1=20,ema2=50):
'''
Compute stock technical analysis indicator - MACD and EMA and add back to the dataset
Input
- df (DataFrame) : The DataFrame of stock data as downloaded from Yahoo Finance
- ema1 (Number) : The first EMA period to add to the dataset
- ema2 (Number) : The second EMA period to add to the dataset
Output
- df (DataFrame) : The DataFrame with new columns added - MACD, ema1 and ema2 e.g. MACD, 20, 50 are the default column name that will be added
'''
df_close = df[['Close']]
df_close.reset_index(level=0, inplace=True)
df_close.columns=['ds','y']
exp1 = df_close.y.ewm(span=12, adjust=False).mean()
exp2 = df_close.y.ewm(span=26, adjust=False).mean()
macd = exp1-exp2
df = pd.merge(df,macd,how='left',left_on=None, right_on=None, left_index=True, right_index=True)
df.columns = ['Date','Open','High','Low','Close','Adj Close','Volume','MACD']
df[ema1] = df['Close'].ewm(span=ema1, adjust=False).mean()
df[ema2] = df['Close'].ewm(span=ema2, adjust=False).mean()
return df
def display_test_validation_graph(unscaled_y_test, y_test_predicted):
'''
Display the plot of the stock price in test dataset and the predicted data.
Input
- unscaled_y_test (Array) : The array of stock price in test dataset.
- y_test_predicted (Array) : The array of stock price as predicted.
Output
- No return value.
- The plot will be displayed on the screen.
'''
plt.gcf().set_size_inches(22, 15, forward=True)
start = 0
end = -1
real = plt.plot(unscaled_y_test[start:end], label='real')
pred = plt.plot(y_test_predicted[start:end], label='predicted')
plt.legend(['Real', 'Predicted'])
plt.show()
def train_model(stock_list, start_date, end_date):
'''
Initial function for the user to use by provide stock list, start and end date of data to train the prediction model.
The function will call other function to download data from Yahoo finance per specific start and end date.
Then, prepare the data and build LSTM model for stock price prediction of 1,5 and 10 day using the set of parameters
that has been tuned that it can get some good result for SET50 and written all 3 models (per stock) to h5 file along with list of MSE of each model on the same path of the script.
Input
- stock_list (String) : List of ticker in space delimited format e.g. tickerA tickerB tickerC.
- start_date (String) : The string of start date in format DD/MM/YYYY.
- end_date (String) : The string of end date in format DD/MM/YYYY.
Output
- No return value.
- Print the model training progress on the screen.
'''
# Split the stock_list to array
stock_list = stock_list.split(' ')
# Convert string to datetime object
start_date = datetime.datetime.strptime(start_date,"%d/%m/%Y")
end_date = datetime.datetime.strptime(end_date,"%d/%m/%Y")
try:
get_stock_data(stock_list, start_date, end_date)
except Exception as e:
print("exception "+str(e)+"on "+stock_list)
# Array for recording MSE from each round of training
mse_list = []
# Train model to predict 1, 5 and 10 days using the best parameters from SET50 as found
# Save the trained model to h5 format to be used by query price function
for stock in stock_list:
print('start model training for stock = '+stock+'. It may take at least 5 minutes...')
# Train model for 1 day predict range by using parameters that we found from our study earlier
# Set up parameters for the model
predict_range = 1
history_points = 90
hidden_layer = 10
batch_size = 10
epoch = 90
dropout_probability = 1.0
mode = 'file'
try:
# Train the model and get the MSE result
model, scaled_mse = train_and_validate_stock_predictor(stock,history_points,predict_range,hidden_layer,batch_size,epoch,dropout_probability,mode)
# Add stock, range of prediction and MSE result to the list
mse_list.append([stock, predict_range, scaled_mse])
# Save the trained model to the runtime
model.save(stock+'_'+str(predict_range)+'.h5')
except Exception as e:
print("exception "+str(e)+"on "+stock)
pd.DataFrame(columns=['predict rage','stock','exception'],data=[predict_range,stock,str(e)]).to_csv('exception.csv')
continue
# Train model for 5 days predict range by using parameters that we found from our study earlier
# Set up parameters for the model
predict_range = 5
history_points = 30
hidden_layer = 70
batch_size = 10
epoch = 60
dropout_probability = 1.0
mode = 'file'
try:
# Train the model and get the MSE result
model, scaled_mse = train_and_validate_stock_predictor(stock,history_points,predict_range,hidden_layer,batch_size,epoch,dropout_probability,mode)
# Add stock, range of prediction and MSE result to the list
mse_list.append([stock, predict_range, scaled_mse])
# Save the trained model to the runtime
model.save(stock+'_'+str(predict_range)+'.h5')
except Exception as e:
print("exception "+str(e)+"on "+stock)
pd.DataFrame(columns=['predict rage','stock','exception'],data=[predict_range,stock,str(e)]).to_csv('exception.csv')
continue
# Train model for 10 days predict range by using parameters that we found from our study earlier
predict_range = 10
history_points = 50
hidden_layer = 60
batch_size = 10
epoch = 80
dropout_probability = 0.3
mode = 'file'
try:
# Train the model and get the MSE result
model, scaled_mse = train_and_validate_stock_predictor(stock,history_points,predict_range,hidden_layer,batch_size,epoch,dropout_probability,mode)
# Add stock, range of prediction and MSE result to the list
mse_list.append([stock, predict_range, scaled_mse])
# Save the trained model to the runtime
model.save(stock+'_'+str(predict_range)+'.h5')
except Exception as e:
print("exception "+str(e)+"on "+stock)
pd.DataFrame(columns=['predict rage','stock','exception'],data=[predict_range,stock,str(e)]).to_csv('exception.csv')
continue
# Write the list of MSE from the loop to file for displaying later in query_price
pd.DataFrame(mse_list).to_csv('mse_list.csv')
print("Completed...")
def query_price(stock_list,date_range):
'''
Query the predicted price from the model of the stocks that were trained by providing the stock name and the date range of prediction from the end date of training data.
It supports only 1,5 and 10 date range.
Input
- stock_list (String) : List of ticker in space delimited format e.g. tickerA tickerB tickerC.
- date_range (Number) : The number of date range to predict the price - 1, 5 and 10 days from end date of the training data set.
'''
# Split the stock list to array
stock_list = stock_list.split(' ')
# Read MSE of the trained model for each stock and each range of prediction
df_mse = pd.read_csv('mse_list.csv')
if date_range <=10:
for stock in stock_list:
try:
# Do prediction by using the same history_points value when the model is trained
# The value that we use will depend on the date_range that the user select
if date_range == 1:
predict_range = 1
history_points = 90
mode = 'file'
if date_range > 1 and date_range <= 5:
predict_range = 5
history_points = 30
mode = 'file'
if date_range > 5 and date_range <= 10:
predict_range = 10
history_points = 50
mode = 'file'
# Load the model
model = load_model(stock+'_'+str(predict_range)+'.h5')
# Read the data and also add MACD and EMA (in the case that mode = df, it will be used otherwise it will be skipped)
df_stock = | pd.read_csv(stock+'.csv') | pandas.read_csv |
import csv
import datetime
import math
import sys
import time
import urllib.request
import pandas as pd
import pytz
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
def download_csv(url, file_path):
urllib.request.urlretrieve(url, file_path)
CSV_PATH = "./prefectures.csv"
try:
download_csv("https://toyokeizai.net/sp/visual/tko/covid19/csv/prefectures.csv", CSV_PATH)
except Exception as e:
print(e)
csv_input = | pd.read_csv(filepath_or_buffer=CSV_PATH, encoding="utf-8", sep=",") | pandas.read_csv |
# -------------------------------------------------------------------------------
# Licence:
# Copyright (c) 2012-2018 <NAME>
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
#
# Name: LSTM.py
# Purpose:
#
# Author: <NAME>
#
# Created: 05/11/2018
# -------------------------------------------------------------------------------
from gecosistema_core import *
#pandas
import numpy as np
import pandas as pd
import os,sys
stderr = sys.stderr
from sklearn.preprocessing import MinMaxScaler
#keras
sys.stderr = open(os.devnull, 'w')
from keras.models import Sequential
from keras.layers import *
sys.stderr = stderr
class SimpleLSTM(Sequential):
def __init__(self, neurons=3 , dropout =0.05, dense=1, train_percent = 0.75 ):
Sequential.__init__(self)
self.neurons = neurons
self.dropout = dropout
self.dense=dense
self.scaler = MinMaxScaler(feature_range=(-1, 1))
self.df = None
self.X = None
self.y = None
self.train_percent = train_percent
self.predictions = None
def load(self,filename):
"""
load
"""
self.df = | pd.read_csv(filename, sep=",", header=0, comment="#", engine='c') | pandas.read_csv |
import numpy as np
import pandas as pd
import os
import pickle
import shutil
import urllib
import gzip
#
from .... import global_var
from . import geography, paths, transcode, url
def download_raw_weather_data(year = None,
month = None,
):
"""
Downloads the weather data provided by Météo-France.
:param year: The selected year
:param month: The selected month
:type year: int
:type month: int
:return: None
:rtype: None
"""
assert type(year) == int and year > 2000, year
assert type(month) == int and month in np.arange(1,13), month
os.makedirs(paths.folder_weather_meteofrance_raw,
exist_ok = True,
)
gzip_file_url = urllib.parse.urljoin(url.dikt['weather'],
paths.dikt_files['weather.file_year_month'].format(year = year, month = month),
) + '.csv.gz'
gz_file_path = os.path.join(paths.folder_weather_meteofrance_raw,
paths.dikt_files['weather.file_year_month'].format(year = year, month = month),
) + '.csv.gz'
urllib.request.urlretrieve(gzip_file_url,
gz_file_path,
)
csv_file_path = os.path.join(paths.folder_weather_meteofrance_raw,
paths.dikt_files['weather.file_year_month'].format(year = year, month = month),
) + '.csv'
with gzip.open(gz_file_path, 'rb') as f_in:
with open(csv_file_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def read_raw_weather_data(year = None,
month = None,
):
"""
Reads the weather data provided by Météo-France.
:param year: The selected year
:param month: The selected month
:type year: int
:type month: int
:return: The selected weather data
:rtype: pd.DataFrame
"""
assert type(year) == int and year > 2000, year
assert type(month) == int and month in np.arange(1,13), month
csv_file_path = os.path.join(paths.folder_weather_meteofrance_raw,
paths.dikt_files['weather.file_year_month'].format(year = year, month = month),
) + '.csv'
df = pd.read_csv(csv_file_path,
sep = ';',
na_values = ['mq'],
)
df = df.rename(transcode.columns,
axis = 1,
)
df[global_var.weather_dt_UTC] = pd.to_datetime(df[global_var.weather_dt_UTC],
format = '%Y%m%d%H%M%S',
).dt.tz_localize('UTC')
df.drop_duplicates(subset = [global_var.weather_dt_UTC, global_var.weather_site_id],
inplace = True,
keep = 'first',
)
df = df.set_index([global_var.weather_dt_UTC, global_var.weather_site_id])
df[global_var.weather_temperature_celsius] = df[global_var.weather_temperature_kelvin] - 273.15
df = df[[global_var.weather_temperature_celsius,
global_var.weather_nebulosity,
global_var.weather_wind_speed,
]]
df = df.astype(float)
df = df.unstack()
df = df.swaplevel(0,1, axis = 1)
df.columns.names = [global_var.weather_site_id, global_var.weather_physical_quantity]
return df
def correct_filter_weather(df_weather):
"""
Finds and tries to corrects anomalies in
the weather data provided by Météo-France.
:param df_weather: The weather data frame
:type df_weather: pd.DataFrame
:return: The corrected weather data frame
:rtype: pd.DataFrame
"""
# Some series start with a few Nan so correct them or drop them
length_missing_data_beginning = (1 - | pd.isnull(df_weather) | pandas.isnull |
import pandas as pd
import numpy as np
from random import gauss, uniform
def get_makespan(curr_plan, num_resources, workflow_inaccur, positive=False, dynamic_res=False):
'''
Calculate makespan
'''
under = False
reactive_resource_usage = [0] * num_resources
resource_usage = [0] * num_resources
expected = [0] * num_resources
tmp_idx = [0] * num_resources
for placement in curr_plan:
workflow = placement[0]
resource = placement[1]
resource_id = resource['id']
expected_finish = placement[3]
if dynamic_res:
perf = gauss(resource['performance'], resource['performance'] * 0.0644)
else:
perf = resource['performance']
if positive:
inaccur = uniform(0, workflow_inaccur)
else:
inaccur = uniform(-workflow_inaccur, workflow_inaccur)
exec_time = (workflow['num_oper'] * (1 + inaccur)) / perf
reactive_resource_usage[resource_id - 1] += exec_time
resource_usage[resource_id - 1] = max(resource_usage[resource_id - 1] + exec_time, expected_finish)
expected[resource_id - 1] = expected_finish
tmp_idx[resource_id - 1] += 1
return max(resource_usage), max(reactive_resource_usage), max(expected)
# ------------------------------------------------------------------------------
# 5%
test_case = pd.read_csv('../Data/heft/DynHeteroResources_StHeteroCampaignsHEFT_inaccur_p5perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, size, 0.05, dynamic_res=True, positive=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/heft/DynHeteroResources_StHeteroCampaignsHEFT_inaccur_p5perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, size, 0.05, dynamic_res=False, positive=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/heft/StHeteroResources_StHeteroCampaignsHEFT_inaccur_p5perc.csv', index=False)
# ------------------------------------------------------------------------------
# 10%
test_case = pd.read_csv('../Data/heft/DynHeteroResources_StHeteroCampaignsHEFT_inaccur_p10perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, size, 0.1, dynamic_res=True, positive=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/heft/DynHeteroResources_StHeteroCampaignsHEFT_inaccur_p10perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, size, 0.1, dynamic_res=False, positive=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/heft/StHeteroResources_StHeteroCampaignsHEFT_inaccur_p10perc.csv', index=False)
# ------------------------------------------------------------------------------
# 20%
test_case = pd.read_csv('../Data/heft/DynHeteroResources_StHeteroCampaignsHEFT_inaccur_p20perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, size, 0.2, dynamic_res=True, positive=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/heft/DynHeteroResources_StHeteroCampaignsHEFT_inaccur_p20perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, size, 0.2, dynamic_res=False, positive=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/heft/StHeteroResources_StHeteroCampaignsHEFT_inaccur_p20perc.csv', index=False)
# ------------------------------------------------------------------------------
# 30%
test_case = pd.read_csv('../Data/heft/DynHeteroResources_StHeteroCampaignsHEFT_inaccur_p30perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, size, 0.3, dynamic_res=True, positive=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/heft/DynHeteroResources_StHeteroCampaignsHEFT_inaccur_p30perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, size, 0.3, dynamic_res=False, positive=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/heft/StHeteroResources_StHeteroCampaignsHEFT_inaccur_p30perc.csv', index=False)
# ------------------------------------------------------------------------------
# 40%
test_case = pd.read_csv('../Data/heft/DynHeteroResources_StHeteroCampaignsHEFT_inaccur_p40perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, size, 0.4, dynamic_res=True, positive=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/heft/DynHeteroResources_StHeteroCampaignsHEFT_inaccur_p40perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, size, 0.4, dynamic_res=False, positive=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/heft/StHeteroResources_StHeteroCampaignsHEFT_inaccur_p40perc.csv', index=False)
# ------------------------------------------------------------------------------
# 50%
test_case = pd.read_csv('../Data/heft/DynHeteroResources_StHeteroCampaignsHEFT_inaccur_p50perc.csv')
results = | pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time']) | pandas.DataFrame |
import pandas as pd
import babel
from classification import (
Hierarchy,
parent_code_table_to_parent_id_table,
Classification,
)
if __name__ == "__main__":
df = pd.read_csv(
"./in/wikipedia-iso-country-codes.csv",
encoding="utf-8",
dtype={"Numeric code": str},
na_values=None,
keep_default_na=False,
)
df.columns = ["name_short_en", "code_alpha2", "code_alpha3", "code_numeric", "link"]
df = df[["code_alpha2", "code_alpha3", "code_numeric"]]
df = df.sort_values("code_alpha3").reset_index(drop=True)
# Merge in customary names in different languages from babel (Unicode CLDR)
en_names = pd.DataFrame.from_dict(
dict(babel.Locale.parse("en_US").territories), orient="index"
)
en_names.columns = ["name_en"]
df = df.merge(en_names, left_on="code_alpha2", right_index=True, how="left")
es_names = pd.DataFrame.from_dict(
dict(babel.Locale.parse("es_419").territories), orient="index"
)
es_names.columns = ["name_es"]
df = df.merge(es_names, left_on="code_alpha2", right_index=True, how="left")
# Merge in region codes
alpha3_to_region = pd.read_csv(
"./in/countries_to_regions.csv", dtype={"parent_code": str}
)
df = df.merge(alpha3_to_region, on="code_alpha3", how="left")
# Add custom codes
custom_codes = pd.read_csv("./in/custom-codes.csv", dtype={"parent_code": str})
df = pd.concat([df, custom_codes]).reset_index(drop=True)
df["level"] = "country"
# Add region code level
region_codes = | pd.read_table("./in/regions.tsv", dtype={"code": str}) | pandas.read_table |
from app.calculation_service.model.scenario_inputs import ScenarioInputs
from app.calculation_service.model.study_design import StudyDesign
from app.calculation_service.api import _generate_models, _calculate_power
import pandas as pd
pd.set_option('precision', 7)
def json_power(json_path):
with open(json_path, 'r') as f:
data = f.read()
inputs = ScenarioInputs().load_from_json(data)
scenario = StudyDesign().load_from_json(data)
models = _generate_models(scenario, inputs)
results = []
for m in models:
result = _calculate_power(m)
outdata = {'Power': result['power'],
'Test': result['test'],
'Sigma Scale': result['model']['variance_scale_factor'],
'Beta Scale': result['model']['means_scale_factor'],
'Total N': result['model']['total_n'],
'Alpha': result['model']['alpha']}
results.append(outdata)
return pd.DataFrame(results)
def tex_table(file_path, V3_JSON, V2_results):
_df_vtest = json_power(file_path + V3_JSON)
_df_v2results = | pd.read_csv(file_path + V2_results) | pandas.read_csv |
# python 3
import matplotlib
# matplotlib.use('pgf')
# pgf_with_pdflatex = {
# "pgf.texsystem": "pdflatex",
# "pgf.preamble": [
# r"\usepackage[utf8x]{inputenc}",
# r"\usepackage[T1]{fontenc}",
# r"\usepackage{cmbright}",
# ]
# }
# matplotlib.rcParams.update(pgf_with_pdflatex)
import pandas
import re
from matplotlib2tikz import save as tikz_save
import numpy
from matplotlib import pyplot
matplotlib.style.use('ggplot')
pyplot.interactive(False)
def to_min_secs(x, pos):
x = int(x)
minutes = x // 60
seconds = x % 60
return '{:02d}:{:02d}'.format(minutes, seconds)
def get_speed_stats(speed_data_path):
df = pandas.read_csv(speed_data_path, sep=',', thousands=',')
try:
node_nr = re.search('ProvidenciaExampleScenario.(.+?).veinsmobility.speed', df.columns[1]).group(1)
except AttributeError:
node_nr = '??' # apply your error handling
df.columns = ['time', 'speed']
mean = df['speed'].mean()
std = df['speed'].std()
return (node_nr, mean, std)
def build_dataframe_case(case):
# mobility data
mobility_columns = ['module', 'max_speed', 'min_speed', 'start_time', 'stop_time',
'total_co2', 'total_dist', 'total_time']
case_df_mobility = pandas.read_csv(case + '_stats_veinsmobility.csv')
case_df_mobility.columns = mobility_columns
mobility_search_re = 'ProvidenciaExampleScenario.(.+?).veinsmobility'
case_df_mobility['module'] = case_df_mobility['module'].map(lambda x: re.search(mobility_search_re, x).group(1))
case_df_mobility.set_index(['module'], inplace=True)
# appl data (sent warnings, arrived at dest)
appl_columns = ['module', 'arrived', 'rcvd_warnings', 'sent_warnings']
case_df_appl = pandas.read_csv(case + '_stats_appl.csv')
case_df_appl.columns = appl_columns
appl_search_re = 'ProvidenciaExampleScenario.(.+?).appl'
case_df_appl['module'] = case_df_appl['module'].map(lambda x: re.search(appl_search_re, x).group(1))
case_df_appl['arrived'] = case_df_appl['arrived'].map({1: True, 0: False})
case_df_appl.set_index(['module'], inplace=True)
case_df_speed = pandas.DataFrame()
case_df_speed['mean_speed'] = case_df_mobility['total_dist'] / case_df_mobility['total_time']
# join all tables
case_df = pandas.merge(case_df_mobility, case_df_appl, left_index=True, right_index=True, how='outer')
case_df = pandas.merge(case_df, case_df_speed, left_index=True, right_index=True, how='outer')
return case_df
def buid_csv():
for case in ['per0.0', 'per1.0', 'base_case', 'per0.5', 'per0.75', 'per0.25']:
df = build_dataframe_case(case)
df.to_csv(case + '_total_stats.csv')
def analysis_arrived_vhc():
per00 = pandas.read_csv('per0.0_total_stats.csv').set_index(['module'])
per10 = pandas.read_csv('per1.0_total_stats.csv').set_index(['module'])
base = pandas.read_csv('base_case_total_stats.csv').set_index(['module'])
per05 = pandas.read_csv('per0.5_total_stats.csv').set_index(['module'])
per075 = pandas.read_csv('per0.75_total_stats.csv').set_index(['module'])
per025 = pandas.read_csv('per0.25_total_stats.csv').set_index(['module'])
base_arrived_cnt = base['arrived'].sum()
per00_arrived_cnt = per00['arrived'].sum()
per10_arrived_cnt = per10['arrived'].sum()
per05_arrived_cnt = per05['arrived'].sum()
per075_arrived_cnt = per075['arrived'].sum()
per025_arrived_cnt = per025['arrived'].sum()
objects = ('Caso Base', 'PER 0.0', 'PER 0.25', 'PER 0.75', 'PER 1.0')
#objects = ('Caso Base', 'PER 0.0', 'PER 1.0')
x_ax = numpy.arange(len(objects))
#bars = [base_arrived_cnt, per00_arrived_cnt, per025_arrived_cnt,
# per05_arrived_cnt, per075_arrived_cnt, per10_arrived_cnt]
bars = [base_arrived_cnt, per00_arrived_cnt, per025_arrived_cnt, per075_arrived_cnt, per10_arrived_cnt]
pyplot.bar(x_ax, bars)
#pyplot.yscale('log')
pyplot.yticks(bars)
pyplot.xticks(x_ax, objects)
for a, b in zip(x_ax, bars):
pyplot.text(a, b, str(b))
#pyplot.ylabel('N° de vehículos que alcanzaron su destino')
pyplot.title('N° de vehículos que alcanzaron su destino (escala log)')
pyplot.show()
def analysis_speed():
per00 = pandas.read_csv('per0.0_total_stats.csv').set_index(['module'])
per10 = pandas.read_csv('per1.0_total_stats.csv').set_index(['module'])
base = pandas.read_csv('base_case_total_stats.csv').set_index(['module'])
per05 = pandas.read_csv('per0.5_total_stats.csv').set_index(['module'])
per075 = pandas.read_csv('per0.75_total_stats.csv').set_index(['module'])
per025 = pandas.read_csv('per0.25_total_stats.csv').set_index(['module'])
y = [base.loc[base['arrived'] == False]['mean_speed'].mean(),
per00.loc[per00['arrived'] == False]['mean_speed'].mean(),
per025.loc[per025['arrived'] == False]['mean_speed'].mean(),
per05.loc[per05['arrived'] == False]['mean_speed'].mean(),
per075.loc[per075['arrived'] == False]['mean_speed'].mean(),
per10.loc[per10['arrived'] == False]['mean_speed'].mean()]
objects = ('Caso Base', 'PER 0.0', 'PER 0.25', 'PER 0.5', 'PER 0.75', 'PER 1.0')
x = numpy.arange(len(objects))
pyplot.bar(x, y)
pyplot.yscale('log')
#pyplot.yticks(y)
pyplot.xticks(x, objects)
pyplot.ylabel('Velocidad m/s')
pyplot.title('Velocidades promedio de vehículos que NO alcanzaron su destino.')
for a, b in zip(x, y):
pyplot.text(a, b, str(b))
pyplot.show()
def analysis_distance():
per00 = pandas.read_csv('per0.0_total_stats.csv').set_index(['module'])
per10 = | pandas.read_csv('per1.0_total_stats.csv') | pandas.read_csv |
import pandas as pd
from flask import Flask,jsonify,request
import joblib
scaler = joblib.load('scaler.pkl')
model = joblib.load('model.pkl')
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
@app.route('/predict', methods=['POST'])
def index():
body = request.json
Pclass = body["Pclass"]
Sex = body["Sex"]
Age = body["Age"]
SibSp = body["SibSp"]
Parch = body["Parch"]
Fare = body["Fare"]
Embarked = body["Embarked"]
try:
if Embarked == "Q":
array=pd.DataFrame([[Pclass,Sex,Age,SibSp,Parch,Fare,0,1,0]])
elif Embarked == "C":
array=pd.DataFrame([[Pclass,Sex,Age,SibSp,Parch,Fare,1,0,0]])
elif Embarked == "S":
array= | pd.DataFrame([[Pclass,Sex,Age,SibSp,Parch,Fare,0,0,1]]) | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
concat,
date_range,
)
import pandas._testing as tm
class TestEmptyConcat:
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
{"A": range(10000)}, index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"left,right,expected",
[
# booleans
(np.bool_, np.int32, np.int32),
(np.bool_, np.float32, np.object_),
# datetime-like
("m8[ns]", np.bool_, np.object_),
("m8[ns]", np.int64, np.object_),
("M8[ns]", np.bool_, np.object_),
("M8[ns]", np.int64, np.object_),
# categorical
("category", "category", "category"),
("category", "object", "object"),
],
)
def test_concat_empty_series_dtypes(self, left, right, expected):
result = concat([Series(dtype=left), | Series(dtype=right) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.