prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import random
import logging
import pandas as pd
# set the logger
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
""":cvar
(self.dataset) image_df attributes:
- image_id : int
- name : str
- folder : str
- path : str (separated by / )
- width : int
- height : int
- format : class [(default) RGB, GBR, SHA ]
"""
class ImgData:
""" data extract from image dataset. """
def __init__(self, root: str, dataset):
self.dataset = dataset
self.root = root
@classmethod
def extract(cls, dataset_path: str):
"""
:param: dataset_path: directory of the dataset.
:return: ImgData instance
Extract folder names, all the files in the dataset.pip
"""
folders = ImgData.ext_folders(dataset_path)
files = ImgData.ext_files(dataset_path)
dataset = {"folders": folders, "files": files}
data_df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
from warnings import catch_warnings
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
import pandas as pd
from pandas.core import config as cf
from pandas.compat import u
from pandas._libs.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import (
array_equivalent, isnull, notnull,
na_value_for_dtype)
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
class TestIsNull(object):
def test_0d_array(self):
assert isnull(np.array(np.nan))
assert not isnull(np.array(0.0))
assert not isnull(np.array(0))
# test object dtype
assert isnull(np.array(np.nan, dtype=object))
assert not isnull(np.array(0.0, dtype=object))
assert not isnull(np.array(0, dtype=object))
def test_empty_object(self):
for shape in [(4, 0), (4,)]:
arr = np.empty(shape=shape, dtype=object)
result = isnull(arr)
expected = np.ones(shape=shape, dtype=bool)
tm.assert_numpy_array_equal(result, expected)
def test_isnull(self):
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert float('nan')
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert isinstance(isnull(s), Series)
# frame
for df in [tm.makeTimeDataFrame(), tm.makePeriodFrame(),
tm.makeMixedDataFrame()]:
result = isnull(df)
expected = df.apply(isnull)
tm.assert_frame_equal(result, expected)
# panel
with catch_warnings(record=True):
for p in [tm.makePanel(), tm.makePeriodPanel(),
tm.add_nans(tm.makePanel())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel_equal(result, expected)
# panel 4d
with catch_warnings(record=True):
for p in [tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel4d_equal(result, expected)
def test_isnull_lists(self):
result = isnull([[False]])
exp = np.array([[False]])
tm.assert_numpy_array_equal(result, exp)
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
tm.assert_numpy_array_equal(result, exp)
# list of strings / unicode
result = isnull(['foo', 'bar'])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
result = isnull([u('foo'), u('bar')])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
def test_isnull_nat(self):
result = isnull([NaT])
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
result = isnull(np.array([NaT], dtype=object))
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
def test_isnull_numpy_nat(self):
arr = np.array([NaT, np.datetime64('NaT'), np.timedelta64('NaT'),
np.datetime64('NaT', 's')])
result = isnull(arr)
expected = np.array([True] * 4)
tm.assert_numpy_array_equal(result, expected)
def test_isnull_datetime(self):
assert not isnull(datetime.now())
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
exp = np.ones(len(idx), dtype=bool)
tm.assert_numpy_array_equal(notnull(idx), exp)
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
# GH 9129
pidx = idx.to_period(freq='M')
mask = isnull(pidx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
mask = isnull(pidx[1:])
exp = np.zeros(len(mask), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
def test_datetime_other_units(self):
idx = pd.DatetimeIndex(['2011-01-01', 'NaT', '2011-01-02'])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(idx), exp)
tm.assert_numpy_array_equal(notnull(idx), ~exp)
tm.assert_numpy_array_equal(isnull(idx.values), exp)
tm.assert_numpy_array_equal(notnull(idx.values), ~exp)
for dtype in ['datetime64[D]', 'datetime64[h]', 'datetime64[m]',
'datetime64[s]', 'datetime64[ms]', 'datetime64[us]',
'datetime64[ns]']:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(values), exp)
tm.assert_numpy_array_equal(notnull(values), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(values)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
s = pd.Series(values, dtype=object)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
def test_timedelta_other_units(self):
idx = pd.TimedeltaIndex(['1 days', 'NaT', '2 days'])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(idx), exp)
tm.assert_numpy_array_equal(notnull(idx), ~exp)
tm.assert_numpy_array_equal(isnull(idx.values), exp)
tm.assert_numpy_array_equal( | notnull(idx.values) | pandas.core.dtypes.missing.notnull |
"""Core managing the datasets.
Usage example:
# Create a dataset of 100 benign PE and 100 malware samples, with minimum
# malice of 0.9
DatasetCore.create_dataset_from_parameters(AnalyzedFileTypes.PE, 0.9,
9 * [True], 200, 0.5,
"pe_malice.csv")
# Create a dataset of 200 generic and trojan PE samples, with minimum malice
# of 0.9
DatasetCore.create_dataset_from_parameters(
AnalyzedFileTypes.PE, 0.9,
[True, True, False, False, False, False, False, False, False], 200, 0,
"pe_generic_vs_trojan.csv")
# Delete the created datasets
DatasetCore.remove_dataset("pe_malice.csv")
DatasetCore.remove_dataset("pe_generic_vs_trojan.csv")
"""
import json
import os
import typing
import modules.dataset.errors as errors
import pandas
import yaml
from modules.configuration.folder_structure import Files, Folders
from modules.configuration.parameters import Packages
from modules.dataset.types import AnalyzedFileTypes
from modules.utils.configuration_manager import ConfigurationManager
from modules.utils.types import ConfigurationSpaces
CONFIGURATION_KEYS = Packages.Dataset.ConfigurationKeys
class DatasetCore:
"""Class for working with datasets."""
@staticmethod
def _get_metadata(dataset_full_path: str) -> dict:
with open(dataset_full_path, "r") as dataset_file:
lines = dataset_file.readlines()
if len(lines) == 0:
return None
metadata_line = lines[0]
if (not metadata_line.startswith(
Packages.Dataset.METADATA_LINE_START)):
return None
metadata_line = metadata_line[len(Packages.Dataset.METADATA_LINE_START
):]
metadata = json.loads(metadata_line)
return metadata
@staticmethod
def _dump_metadata(dataset_full_path: str, metadata: dict) -> None:
stringified_metadata = json.dumps(metadata)
with open(dataset_full_path, "r+") as output_file:
content = output_file.read()
new_content = Packages.Dataset.METADATA_LINE_START
new_content += stringified_metadata + "\n" + content
output_file.seek(0, 0)
output_file.write(new_content)
@staticmethod
def create_dataset_from_parameters(file_type: AnalyzedFileTypes,
min_malice: float,
desired_families: typing.List[bool],
entries_count: int,
benign_ratio: float,
output_filename: str,
description: str = "") -> bool:
"""Creates a custom dataset based on the given parameters.
Args:
file_type (AnalyzedFileTypes): Type of files to include
min_malice (float): Minimum malice score of malware samples included
in the dataset
desired_families (typing.List[bool]): Array of booleans, in which
each entry indicates if the pointed family (via index) is
included into the dataset
entries_count (int): Mandatory number of entries in the dataset
benign_ratio (float): Ratio between the size of benign samples and
of the whole dataset
output_filename (str): The base name of the output file
description (str): Description of the dataset. Defaults to "".
Raises:
InsufficientEntriesForDatasetError: The dataset could not be build
due to insufficient entries.
Returns:
bool: Boolean indicating if the dataset was successfully created
"""
malware_labels_df = pandas.read_csv(Files.MALWARE_LABELS)
benign_labels_df = pandas.read_csv(Files.BENIGN_LABELS)
# Select only the desired file type
malware_labels_df = malware_labels_df[malware_labels_df["type"] ==
file_type.value.ID]
benign_labels_df = benign_labels_df[benign_labels_df["type"] ==
file_type.value.ID]
# Get the entries' count for each type of sample
malware_count = int((1 - benign_ratio) * entries_count)
benign_count = entries_count - malware_count
# Select the entries with malice above the minimum one
malware_labels_df = malware_labels_df[
malware_labels_df["malice"] >= min_malice]
# Check if a dataset can be built
if (len(malware_labels_df) < malware_count
or len(benign_labels_df) < benign_count):
raise errors.InsufficientEntriesForDatasetError()
# Select entries with the maximum membership to the given categories
desired_families_int = [1 if elem else 0 for elem in desired_families]
malware_labels_df["membership"] = malware_labels_df.iloc[:, 3:].dot(
desired_families_int)
malware_labels_df.sort_values("membership")
del malware_labels_df["membership"]
malware_labels_df = malware_labels_df.head(malware_count)
# Select the benign entries in a random manner
benign_labels_df = benign_labels_df.sample(n=benign_count)
# Merge the data frames
all_labels_df = pandas.concat([malware_labels_df, benign_labels_df])
all_labels_df = all_labels_df.sample(frac=1).reset_index(drop=True)
# Dump the dataframe to file
output_full_filename = os.path.join(Folders.CUSTOM_DATASETS,
output_filename)
all_labels_df.to_csv(output_full_filename, index=False)
# Create the metadata and dump them
desired_families_names = [
name for include, name in zip(
desired_families, malware_labels_df.columns[3:]) if include
]
metadata = {
"description": description,
"extension": file_type.value.STANDARD_EXTENSION,
"min_malice": min_malice,
"desired_families": desired_families_names,
"entries_count": entries_count,
"benign_ratio": benign_ratio
}
DatasetCore._dump_metadata(output_full_filename, metadata)
return True
@staticmethod
def create_dataset_from_config(config_filename: str) -> bool:
"""Creates a custom dataset based on the configuration from a file.
Args:
config_filename (str): YAML configuration file
Raises:
DatasetConfigurationFileNotFoundError: The configuration file of the
dataset could not be found or opened.
InvalidFileExtensionError: The mentioned file extension from the
dataset configuration file is invalid.
DatasetConfigurationMandatoryKeysNotPresentError: The dataset
configuration file does not contain all mandatory keys.
Returns:
bool: Boolean indicating if the dataset was successfully created
"""
# Get the malware families
configuration = ConfigurationManager()
dataset_config = configuration.get_space(ConfigurationSpaces.DATASET)
malware_families = dataset_config["malware_families"].keys()
malware_families = [family.lower() for family in malware_families]
try:
with open(config_filename, "r") as config_file:
configuration = yaml.load(config_file, Loader=yaml.SafeLoader)
except Exception:
raise errors.DatasetConfigurationFileNotFoundError()
# Check if the mandatory keys are present
valid_keys = [
elem.value for elem in CONFIGURATION_KEYS
if not elem.name.endswith("_")
]
for key in valid_keys:
if key not in configuration.keys():
raise errors.DatasetConfigurationMandatoryKeysNotPresentError()
# Map the families names to elements in an array of booleans
processed_desired_categories = 9 * [False]
for family in configuration[CONFIGURATION_KEYS.DESIRED_FAMILIES.value]:
try:
index = malware_families.index(family)
processed_desired_categories[index] = True
except Exception: # nosec
pass
configuration[CONFIGURATION_KEYS.DESIRED_FAMILIES.
value] = processed_desired_categories
# Map the desired extension to a file type
file_type = AnalyzedFileTypes.map_extension_to_type(
configuration.pop(CONFIGURATION_KEYS.FILE_EXTENSION.value))
if not file_type:
raise errors.InvalidFileExtensionError()
configuration["file_type"] = file_type
return DatasetCore.create_dataset_from_parameters(**configuration)
@staticmethod
def list_datasets() -> typing.List[typing.List]:
"""Lists all created datasets by collecting their metadata.
Returns:
typing.List[typing.List]: Datasets metadata
"""
all_metadata = pandas.DataFrame()
filenames = os.listdir(Folders.CUSTOM_DATASETS)
for filename in filenames:
# Skip the dotfiles (mainly the .gitignore)
if filename.startswith("."):
continue
dataset_full_filename = os.path.join(Folders.CUSTOM_DATASETS,
filename)
metadata = DatasetCore._get_metadata(dataset_full_filename)
metadata["filename"] = filename
all_metadata = all_metadata.append(metadata, ignore_index=True)
datasets_details = all_metadata.values.tolist()
datasets_details.insert(0, all_metadata.columns.to_list())
return datasets_details
@staticmethod
def read_dataset(dataset_path: str) -> pandas.DataFrame:
"""Reads a stock dataset or a custom one.
Args:
dataset_path (str): Path of the dataset
Returns:
pandas.DataFrame: Read dataset
"""
return | pandas.read_csv(dataset_path, index_col=False, skiprows=1) | pandas.read_csv |
from psaw import PushshiftAPI
import datetime as dt
import pandas as pd
submission_filter = [
'author',
'author_fullname',
'full_link',
'is_self',
'num_comments',
'score',
'selftext',
'title',
'id',
]
comment_filter = [
'author',
'author_fullname',
'body',
'is_submitter',
'id',
'link_id', # post id
'parent_id', # parent id = link id when top level comment
'score',
'total_awards_received',
]
def clean_posts(df):
df = df.loc[(df["title"].str.startswith("AITA")) | (df["title"].str.startswith("WIBTA"))]
df = df.loc[~(df["selftext"] == "[removed]")]
df = df.loc[~(pd.isna(df["selftext"]))]
df = df.loc[~df.selftext == ""]
df = df.loc[df["num_comments"] > 0]
return df
def clean_comments(df, post_ids):
df = df.loc[df["parent_id"] == df["link_id"]]
df["link_id"] = df["link_id"].apply(lambda x: x[3:])
df = df.loc[df["link_id"].isin(post_ids)]
def find_labels(text: str):
return [q for q in ["NTA", "YTA", "ESH", "NAH", "INFO"] if q in text]
df["labels"] = df["body"].apply(lambda x: find_labels(x))
df["num_labels"] = df["labels"].apply(lambda x: len(x))
df = df.loc[df["num_labels"] == 1]
df["labels"] = df["labels"].apply(lambda x: x[0])
return df
def merge_comments_and_posts(df_posts: pd.DataFrame, df_comments: pd.DataFrame):
itol = ["NTA", "YTA", "ESH", "NAH", "INFO"]
ltoi = {l:i for i,l in enumerate(itol)}
print("cleaning posts")
l = len(df_posts)
df_posts = clean_posts(df_posts)
post_ids = df_posts.id.to_list()
print(f"{l - len(df_posts)} posts removed, cleaning comments")
l = len(df_comments)
df_comments = clean_comments(df_comments, post_ids)
print(f"{l - len(df_comments)} comments removed, merging posts and comments")
comment_labels = df_comments.labels.to_list()
comment_post_ids = df_comments.link_id.to_list()
comment_score = df_comments.score.to_list()
post_labels_dict = {post_id: [0,0,0,0,0] for post_id in post_ids}
for post_id, label, score in zip(comment_post_ids, comment_labels, comment_score):
post_labels_dict[post_id][ltoi[label]] += score
print("updating df_posts with labels")
df_posts["label_counts"] = [post_labels_dict[post_id] for post_id in post_ids]
df_posts["label_sum"] = df_posts["label_counts"].apply(lambda x: sum(x))
l = len(df_posts)
df_posts = df_posts[df_posts["label_sum"] > 0]
df_posts["label_probs"] = [[c/s for c in counts] for counts, s in zip(
df_posts["label_counts"], df_posts["label_sum"])]
print(f"{l - len(df_posts)} posts removed")
df_posts.to_pickle("aita_2019_posts_labeled.pkl")
df_comments.to_pickle("aita_2019_comments_cleaned.pkl")
if __name__ == '__main__':
api = PushshiftAPI()
start_dt = int(dt.datetime(2019, 1, 1).timestamp())
posts_gen = api.search_submissions(
after=start_dt,
subreddit="amitheasshole",
filter=submission_filter
)
df_posts = | pd.DataFrame() | pandas.DataFrame |
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the zipline.assets package
"""
import sys
from unittest import TestCase
from datetime import (
datetime,
timedelta,
)
import pickle
import pprint
import pytz
import uuid
import pandas as pd
from nose_parameterized import parameterized
from zipline.finance.trading import with_environment
from zipline.assets import Asset, Equity, Future, AssetFinder
from zipline.errors import (
SymbolNotFound,
MultipleSymbolsFound,
SidAssignmentError,
)
class FakeTable(object):
def __init__(self, name, count, dt, fuzzy_str):
self.name = name
self.count = count
self.dt = dt
self.fuzzy_str = fuzzy_str
self.df = pd.DataFrame.from_records(
[
{
'sid': i,
'file_name': 'TEST%s%s' % (self.fuzzy_str, i),
'company_name': self.name + str(i),
'start_date_nano': pd.Timestamp(dt, tz='UTC').value,
'end_date_nano': pd.Timestamp(dt, tz='UTC').value,
'exchange': self.name,
}
for i in range(1, self.count + 1)
]
)
def read(self, *args, **kwargs):
return self.df.to_records()
class FakeTableIdenticalSymbols(object):
def __init__(self, name, as_of_dates):
self.name = name
self.as_of_dates = as_of_dates
self.df = pd.DataFrame.from_records(
[
{
'sid': i,
'file_name': self.name,
'company_name': self.name,
'start_date_nano': date.value,
'end_date_nano': (date + timedelta(days=1)).value,
'exchange': self.name,
}
for i, date in enumerate(self.as_of_dates)
]
)
def read(self, *args, **kwargs):
return self.df.to_records()
class FakeTableFromRecords(object):
def __init__(self, records):
self.records = records
self.df = pd.DataFrame.from_records(self.records)
def read(self, *args, **kwargs):
return self.df.to_records()
@with_environment()
def build_lookup_generic_cases(env=None):
"""
Generate test cases for AssetFinder test_lookup_generic.
"""
unique_start = pd.Timestamp('2013-01-01', tz='UTC')
unique_end = pd.Timestamp('2014-01-01', tz='UTC')
dupe_0_start = pd.Timestamp('2013-01-01', tz='UTC')
dupe_0_end = dupe_0_start + timedelta(days=1)
dupe_1_start = pd.Timestamp('2013-01-03', tz='UTC')
dupe_1_end = dupe_1_start + timedelta(days=1)
table = FakeTableFromRecords(
[
{
'sid': 0,
'file_name': 'duplicated',
'company_name': 'duplicated_0',
'start_date_nano': dupe_0_start.value,
'end_date_nano': dupe_0_end.value,
'exchange': '',
},
{
'sid': 1,
'file_name': 'duplicated',
'company_name': 'duplicated_1',
'start_date_nano': dupe_1_start.value,
'end_date_nano': dupe_1_end.value,
'exchange': '',
},
{
'sid': 2,
'file_name': 'unique',
'company_name': 'unique',
'start_date_nano': unique_start.value,
'end_date_nano': unique_end.value,
'exchange': '',
},
],
)
env.update_asset_finder(asset_metadata=table.df)
dupe_0, dupe_1, unique = assets = [
env.asset_finder.retrieve_asset(i)
for i in range(3)
]
# This expansion code is run at module import time, which means we have to
# clear the AssetFinder here or else it will interfere with the cache
# for other tests.
env.update_asset_finder(clear_metadata=True)
dupe_0_start = dupe_0.start_date
dupe_1_start = dupe_1.start_date
cases = [
##
# Scalars
# Asset object
(table, assets[0], None, assets[0]),
(table, assets[1], None, assets[1]),
(table, assets[2], None, assets[2]),
# int
(table, 0, None, assets[0]),
(table, 1, None, assets[1]),
(table, 2, None, assets[2]),
# Duplicated symbol with resolution date
(table, 'duplicated', dupe_0_start, dupe_0),
(table, 'duplicated', dupe_1_start, dupe_1),
# Unique symbol, with or without resolution date.
(table, 'unique', unique_start, unique),
(table, 'unique', None, unique),
##
# Iterables
# Iterables of Asset objects.
(table, assets, None, assets),
(table, iter(assets), None, assets),
# Iterables of ints
(table, (0, 1), None, assets[:-1]),
(table, iter((0, 1)), None, assets[:-1]),
# Iterables of symbols.
(table, ('duplicated', 'unique'), dupe_0_start, [dupe_0, unique]),
(table, ('duplicated', 'unique'), dupe_1_start, [dupe_1, unique]),
# Mixed types
(table,
('duplicated', 2, 'unique', 1, dupe_1),
dupe_0_start,
[dupe_0, assets[2], unique, assets[1], dupe_1]),
]
return cases
class AssetTestCase(TestCase):
def test_asset_object(self):
self.assertEquals({5061: 'foo'}[Asset(5061)], 'foo')
self.assertEquals(Asset(5061), 5061)
self.assertEquals(5061, Asset(5061))
self.assertEquals(Asset(5061), Asset(5061))
self.assertEquals(int(Asset(5061)), 5061)
self.assertEquals(str(Asset(5061)), 'Asset(5061)')
def test_asset_is_pickleable(self):
# Very wow
s = Asset(
1337,
symbol="DOGE",
asset_name="DOGECOIN",
start_date=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
end_date=pd.Timestamp('2014-06-25 11:21AM', tz='UTC'),
first_traded=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
exchange='THE MOON',
)
s_unpickled = pickle.loads(pickle.dumps(s))
attrs_to_check = ['end_date',
'exchange',
'first_traded',
'asset_end_date',
'asset_name',
'asset_start_date',
'sid',
'start_date',
'symbol']
for attr in attrs_to_check:
self.assertEqual(getattr(s, attr), getattr(s_unpickled, attr))
def test_asset_comparisons(self):
s_23 = Asset(23)
s_24 = Asset(24)
self.assertEqual(s_23, s_23)
self.assertEqual(s_23, 23)
self.assertEqual(23, s_23)
self.assertNotEqual(s_23, s_24)
self.assertNotEqual(s_23, 24)
self.assertNotEqual(s_23, "23")
self.assertNotEqual(s_23, 23.5)
self.assertNotEqual(s_23, [])
self.assertNotEqual(s_23, None)
self.assertLess(s_23, s_24)
self.assertLess(s_23, 24)
self.assertGreater(24, s_23)
self.assertGreater(s_24, s_23)
def test_lt(self):
self.assertTrue(Asset(3) < Asset(4))
self.assertFalse(Asset(4) < Asset(4))
self.assertFalse(Asset(5) < Asset(4))
def test_le(self):
self.assertTrue(Asset(3) <= Asset(4))
self.assertTrue(Asset(4) <= Asset(4))
self.assertFalse(Asset(5) <= Asset(4))
def test_eq(self):
self.assertFalse(Asset(3) == Asset(4))
self.assertTrue(Asset(4) == Asset(4))
self.assertFalse(Asset(5) == Asset(4))
def test_ge(self):
self.assertFalse(Asset(3) >= Asset(4))
self.assertTrue(Asset(4) >= Asset(4))
self.assertTrue(Asset(5) >= Asset(4))
def test_gt(self):
self.assertFalse(Asset(3) > Asset(4))
self.assertFalse(Asset(4) > Asset(4))
self.assertTrue(Asset(5) > Asset(4))
def test_type_mismatch(self):
if sys.version_info.major < 3:
self.assertIsNotNone(Asset(3) < 'a')
self.assertIsNotNone('a' < Asset(3))
else:
with self.assertRaises(TypeError):
Asset(3) < 'a'
with self.assertRaises(TypeError):
'a' < Asset(3)
class TestFuture(TestCase):
future = Future(2468,
symbol='OMK15',
notice_date='2014-01-20',
expiration_date='2014-02-20',
contract_multiplier=500)
def test_str(self):
strd = self.future.__str__()
self.assertEqual("Future(2468 [OMK15])", strd)
def test_repr(self):
reprd = self.future.__repr__()
self.assertTrue("Future" in reprd)
self.assertTrue("2468" in reprd)
self.assertTrue("OMK15" in reprd)
self.assertTrue("notice_date='2014-01-20'" in reprd)
self.assertTrue("expiration_date='2014-02-20'" in reprd)
self.assertTrue("contract_multiplier=500" in reprd)
def test_reduce(self):
reduced = self.future.__reduce__()
self.assertEqual(Future, reduced[0])
def test_to_and_from_dict(self):
dictd = self.future.to_dict()
self.assertTrue('notice_date' in dictd)
self.assertTrue('expiration_date' in dictd)
self.assertTrue('contract_multiplier' in dictd)
from_dict = Future.from_dict(dictd)
self.assertTrue(isinstance(from_dict, Future))
self.assertEqual(self.future, from_dict)
class AssetFinderTestCase(TestCase):
@with_environment()
def test_lookup_symbol_fuzzy(self, env=None):
fuzzy_str = '@'
as_of_date = datetime(2013, 1, 1, tzinfo=pytz.utc)
table = FakeTable(uuid.uuid4().hex, 2, as_of_date,
fuzzy_str)
env.update_asset_finder(asset_metadata=table.df)
sf = env.asset_finder
try:
for i in range(2): # we do it twice to test for caching bugs
self.assertIsNone(sf.lookup_symbol('test', as_of_date))
self.assertIsNotNone(sf.lookup_symbol(
'test%s%s' % (fuzzy_str, 1), as_of_date))
self.assertIsNone(sf.lookup_symbol('test%s' % 1, as_of_date))
self.assertIsNone(sf.lookup_symbol(table.name, as_of_date,
fuzzy=fuzzy_str))
self.assertIsNotNone(sf.lookup_symbol(
'test%s%s' % (fuzzy_str, 1), as_of_date, fuzzy=fuzzy_str))
self.assertIsNotNone(sf.lookup_symbol(
'test%s' % 1, as_of_date, fuzzy=fuzzy_str))
finally:
env.update_asset_finder(clear_metadata=True)
@with_environment()
def test_lookup_symbol_resolve_multiple(self, env=None):
as_of_dates = [
pd.Timestamp('2013-01-01', tz='UTC') + timedelta(days=i)
# Incrementing by two so that start and end dates for each
# generated Asset don't overlap (each Asset's end_date is the
# day after its start date.)
for i in range(0, 10, 2)
]
table = FakeTableIdenticalSymbols(
name='existing',
as_of_dates=as_of_dates,
)
env.update_asset_finder(asset_metadata=table.df)
sf = env.asset_finder
try:
for _ in range(2): # we do it twice to test for caching bugs
with self.assertRaises(SymbolNotFound):
sf.lookup_symbol_resolve_multiple('non_existing',
as_of_dates[0])
with self.assertRaises(MultipleSymbolsFound):
sf.lookup_symbol_resolve_multiple('existing',
None)
for i, date in enumerate(as_of_dates):
# Verify that we correctly resolve multiple symbols using
# the supplied date
result = sf.lookup_symbol_resolve_multiple(
'existing',
date,
)
self.assertEqual(result.symbol, 'existing')
self.assertEqual(result.sid, i)
finally:
env.update_asset_finder(clear_metadata=True)
@with_environment()
def test_lookup_symbol_nasdaq_underscore_collisions(self, env=None):
"""
Ensure that each NASDAQ symbol without underscores maps back to the
original symbol when using fuzzy matching.
"""
sf = env.asset_finder
fuzzy_str = '_'
collisions = []
try:
for sid in sf.sids:
sec = sf.retrieve_asset(sid)
if sec.exchange.startswith('NASDAQ'):
found = sf.lookup_symbol(sec.symbol.replace(fuzzy_str, ''),
sec.end_date, fuzzy=fuzzy_str)
if found != sec:
collisions.append((found, sec))
# KNOWN BUG: Filter out assets that have intersections in their
# start and end dates. We can't correctly resolve these.
unexpected_errors = []
for first, second in collisions:
overlapping_dates = (
first.end_date >= second.start_date or
second.end_date >= first.end_date
)
if not overlapping_dates:
unexpected_errors.append((first, second))
self.assertFalse(
unexpected_errors,
pprint.pformat(unexpected_errors),
)
finally:
env.update_asset_finder(clear_metadata=True)
@parameterized.expand(
build_lookup_generic_cases()
)
@with_environment()
def test_lookup_generic(self, table, symbols, reference_date, expected,
env=None):
"""
Ensure that lookup_generic works with various permutations of inputs.
"""
try:
env.update_asset_finder(asset_metadata=table.df)
finder = env.asset_finder
results, missing = finder.lookup_generic(symbols, reference_date)
self.assertEqual(results, expected)
self.assertEqual(missing, [])
finally:
env.update_asset_finder(clear_metadata=True)
@with_environment()
def test_lookup_generic_handle_missing(self, env=None):
try:
table = FakeTableFromRecords(
[
# Sids that will be found when we do lookups.
{
'sid': 0,
'file_name': 'real',
'company_name': 'real',
'start_date_nano': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': '',
},
{
'sid': 1,
'file_name': 'also_real',
'company_name': 'also_real',
'start_date_nano': | pd.Timestamp('2013-1-1', tz='UTC') | pandas.Timestamp |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from glob import glob
import os.path as path
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
"""
analyze_player.py
This program implements functions to analyze (and assist in analyzing) player
stats.
"""
def join_years(player_dir):
"""Join the stat years for a player into one pandas dataframe.
:player_dir: TODO
:returns: TODO
"""
# Sort the files by year.
year_csvs = sorted(glob(path.join(player_dir, "*")))
dfs = []
master_df = pd.DataFrame()
for csv in year_csvs:
df = | pd.read_csv(csv, parse_dates=True, index_col=0) | pandas.read_csv |
import numpy as np
import pandas as pd
import math
from sklearn.cluster import KMeans
import json
LOG_DIR = 'log_2021-12-06_04:51:47/'
NUM_PER_CLUSTER_LIST = [1, 2, 4, 8, 16]
SCORE_WEIGHT = 3
# artists
with open(LOG_DIR + 'artist.csv') as a:
artists = pd.read_csv(a, header=None)
# following
with open(LOG_DIR + 'following.csv') as f:
following = pd.read_csv(f, header=None)
# spotify
with open(LOG_DIR + 'spotify_artist.csv') as s:
spotify = pd.read_csv(s, header=None)
means_cols = []
# audio features up to duration
for i in range(5, 27, 2):
# omit key
if i == 9:
pass
else:
means_cols.append(i)
# twitter
with open(LOG_DIR + 'twitter_user.csv') as t:
twitter = pd.read_csv(t, header=None)
# check for nan and finite columns
for col in means_cols:
col_nan = np.isnan(spotify.iloc[:, col]).values.any()
if col_nan is True:
print('Column {} has nan'.format(col))
col_inf = np.isinf(spotify.iloc[:, col]).values.any()
if col_inf is True:
print('Column {} has inf'.format(col))
col_large = (spotify.iloc[:, col] >= np.finfo('float64').max).any()
if col_large is True:
print('Column {} has large value'.format(col))
#print('nan in spotify {}'.format(np.isnan(pd.DataFrame(np.nan_to_num(spotify.iloc[:, means_cols]))).values.any()))
#print('done with checks')
# spotify info dictionary
s_info = {}
sids = set()
tids = set()
# spotify id key, add twitter id
for i, row in artists.iterrows():
s_info[row[2]] = {'tid': int(row[1])}
sids.add(row[2])
tids.add(int(row[1]))
print('{} unique spotify ids'.format(len(sids)))
print('{} unique twitter ids'.format(len(tids)))
# spotify name, genres, means
for i, row in spotify.iterrows():
sid = row[0]
if sid in s_info:
s_info[sid]['spotify name'] = row[1]
genres = []
for g in row.iloc[2].replace('[', '').replace(']', '').replace("'", "").split(','):
genres.append(g.strip())
s_info[sid]['genres'] = genres
means = []
for col in means_cols:
means.append(row.iloc[col])
s_info[sid]['means'] = means
# twitter info dictionary
t_info = {}
# twitter id keys, add username, name, followers and following counts
for i, row in twitter.iterrows():
t_info[int(row[0])] = {'username': row[1], 'name': row[2],
'followers count': row[5], 'following count': row[6],
'followers': [], 'following': []}
# followers and following ids
for i, row in following.iterrows():
t_info[int(row[0])]['following'].append(int(row[1]))
t_info[int(row[1])]['followers'].append(int(row[0]))
# artists and followers count table
df_artists_fcounts = | pd.DataFrame(columns=['sid', 'tid', 'followers count']) | pandas.DataFrame |
from flowsa.common import WITHDRAWN_KEYWORD
from flowsa.flowbyfunctions import assign_fips_location_system
from flowsa.location import US_FIPS
import math
import pandas as pd
import io
from flowsa.settings import log
from string import digits
YEARS_COVERED = {
"asbestos": "2014-2018",
"barite": "2014-2018",
"bauxite": "2013-2017",
"beryllium": "2014-2018",
"boron": "2014-2018",
"chromium": "2014-2018",
"clay": "2015-2016",
"cobalt": "2013-2017",
"copper": "2011-2015",
"diatomite": "2014-2018",
"feldspar": "2013-2017",
"fluorspar": "2013-2017",
"fluorspar_inports": ["2016", "2017"],
"gallium": "2014-2018",
"garnet": "2014-2018",
"gold": "2013-2017",
"graphite": "2013-2017",
"gypsum": "2014-2018",
"iodine": "2014-2018",
"ironore": "2014-2018",
"kyanite": "2014-2018",
"lead": "2012-2018",
"lime": "2014-2018",
"lithium": "2013-2017",
"magnesium": "2013-2017",
"manganese": "2012-2016",
"manufacturedabrasive": "2017-2018",
"mica": "2014-2018",
"molybdenum": "2014-2018",
"nickel": "2012-2016",
"niobium": "2014-2018",
"peat": "2014-2018",
"perlite": "2013-2017",
"phosphate": "2014-2018",
"platinum": "2014-2018",
"potash": "2014-2018",
"pumice": "2014-2018",
"rhenium": "2014-2018",
"salt": "2013-2017",
"sandgravelconstruction": "2013-2017",
"sandgravelindustrial": "2014-2018",
"silver": "2012-2016",
"sodaash": "2010-2017",
"sodaash_t4": ["2016", "2017"],
"stonecrushed": "2013-2017",
"stonedimension": "2013-2017",
"strontium": "2014-2018",
"talc": "2013-2017",
"titanium": "2013-2017",
"tungsten": "2013-2017",
"vermiculite": "2014-2018",
"zeolites": "2014-2018",
"zinc": "2013-2017",
"zirconium": "2013-2017",
}
def usgs_myb_year(years, current_year_str):
"""
Sets the column for the string based on the year. Checks that the year
you picked is in the last file.
:param years: string, with hypthon
:param current_year_str: string, year of interest
:return: string, year
"""
years_array = years.split("-")
lower_year = int(years_array[0])
upper_year = int(years_array[1])
current_year = int(current_year_str)
if lower_year <= current_year <= upper_year:
column_val = current_year - lower_year + 1
return "year_" + str(column_val)
else:
log.info("Your year is out of scope. Pick a year between %s and %s",
lower_year, upper_year)
def usgs_myb_name(USGS_Source):
"""
Takes the USGS source name and parses it so it can be used in other parts
of Flow by activity.
:param USGS_Source: string, usgs source name
:return:
"""
source_split = USGS_Source.split("_")
name_cc = str(source_split[2])
name = ""
for char in name_cc:
if char.isupper():
name = name + " " + char
else:
name = name + char
name = name.lower()
name = name.strip()
return name
def usgs_myb_static_variables():
"""
Populates the data values for Flow by activity that are the same
for all of USGS_MYB Files
:return:
"""
data = {}
data["Class"] = "Geological"
data['FlowType'] = "ELEMENTARY_FLOWS"
data["Location"] = US_FIPS
data["Compartment"] = "ground"
data["Context"] = None
data["ActivityConsumedBy"] = None
return data
def usgs_myb_remove_digits(value_string):
"""
Eliminates numbers in a string
:param value_string:
:return:
"""
remove_digits = str.maketrans('', '', digits)
return_string = value_string.translate(remove_digits)
return return_string
def usgs_myb_url_helper(*, build_url, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:param args: dictionary, arguments specified when running flowbyactivity.py
flowbyactivity.py ('year' and 'source')
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
return [build_url]
def usgs_asbestos_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[4:11]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 12:
for x in range(12, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['asbestos'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_asbestos_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
product = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Exports and reexports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(dataframe,
str(year))
return dataframe
def usgs_barite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(
io.BytesIO(resp.content), sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:14]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['barite'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_barite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:3":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Crude, sold or used by producers:":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports:2":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_bauxite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[6:14]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one. columns) == 11:
df_data_one.columns = ["Production", "space_2", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['bauxite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_bauxite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Total"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['bauxite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Production":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, as shipped:":
prod = "import"
elif df.iloc[index]["Production"].strip() == \
"Exports, as shipped:":
prod = "export"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
flow_amount = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = flow_amount
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_beryllium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T4')
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data_two.loc[6:9]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[12:12]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_2.columns) > 11:
for x in range(11, len(df_data_2.columns)):
col_name = "Unnamed: " + str(x)
del df_data_2[col_name]
if len(df_data_1. columns) == 11:
df_data_1.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
if len(df_data_2. columns) == 11:
df_data_2.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['beryllium'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = | pd.concat(frames) | pandas.concat |
import os
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib as mpl
import numpy as np
import random
os.chdir('C:/Users/uf11/Desktop/OutFin') # NOTE: change the path to where OutFin dataset resides
os.chdir('Measurements')
def Validity2_WiFi():
# ---------------------------------- Setting up the layout of the plot ---------------------------------- #
mpl.rc('font', family='serif')
plt.rc('xtick', labelsize=7)
plt.rc('ytick', labelsize=7)
plt.rc('legend', fontsize=5)
plt.rc('axes', titlesize=7)
plt.rc('figure', titlesize=9)
fig, axs = plt.subplots(2, 4, figsize=(10,5))
fig.suptitle('WiFi')
axs[0,0].set_ylabel('Phone1', rotation=0, fontsize=7, labelpad=20)
axs[1,0].set_ylabel('Phone2', rotation=0, fontsize=7, labelpad=20)
# ------------------------------------------------------------------------------------------------------- #
df_all = pd.DataFrame() # initializing a dataframe to aggregate all WiFi .csv files for a given phone
scaler_list = [] # initializing a list to hold the Min Max estimators (i.e., one for each phone)
for i in range(2): # iterate over phones
for j in range(122): # iterate over RPs
df_temp = pd.read_csv('Phone' + str(i + 1) + '_WiFi_' + str(j + 1) + '.csv') # read the WiFi .csv file
df_all = df_all.append(df_temp, ignore_index=True) # append it to the df_all dataframe
df_all = df_all[['SSID', 'BSSID', 'Channel', 'Width', 'Center_Frequency_0', 'Center_Frequency_1', 'Band',
'Capabilities', 'RSS_0', 'RSS_1', 'RSS_2', 'RSS_3', 'RSS_4', 'RSS_5', 'RSS_6', 'RSS_7', 'RSS_8',
'RSS_9', 'RSS_10']] # rearrange the columns for processing
df_temp = df_all.iloc[:, 8:] # select all RSS readings of the APs
df_all['RSS'] = df_temp.mean(axis = 1) # calculate the mean RSS value of the APs
df_all = df_all[['BSSID', 'Channel', 'Width', 'Center_Frequency_0', 'Center_Frequency_1', 'Band', 'RSS']] # leave
# only the columns of interest
scaler = MinMaxScaler(feature_range=(0, 1)) # use Min Max estimator to scale features between 0 and 1
scaler.fit(df_all[['Channel', 'Width', 'Center_Frequency_0', 'Center_Frequency_1', 'Band', 'RSS']]) # fit the
# estimator to the columns of interest
scaler_list.append(scaler) # append the estimator to the list to be used for the respective phone
randomRP = random.sample(range(1, 122), 4) # randomly select 4 RPs
for i in range(len(randomRP)): # iterate over the selected RPs
df1 = pd.read_csv('Phone1_WiFi_' + str(randomRP[i]) + '.csv') # read the WiFi .csv file corresponding to the RP for
# Phone 1
df_temp = df1.iloc[:, 8:] # select all RSS readings of the APs
df1['RSS'] = df_temp.mean(axis = 1) # calculate the mean RSS value of the APs
df1 = df1[['BSSID', 'Channel', 'Width', 'Center_Frequency_0', 'Center_Frequency_1', 'Band', 'RSS']] # leave only
# the columns of interest
df1[['Channel', 'Width', 'Center_Frequency_0', 'Center_Frequency_1', 'Band', 'RSS']] = \
scaler_list[0].transform(df1[['Channel', 'Width', 'Center_Frequency_0', 'Center_Frequency_1', 'Band', 'RSS']]) #
# scale the values according to the estimator of Phone 1
df2 = pd.read_csv('Phone2_WiFi_' + str(randomRP[i]) + '.csv') # read the WiFi .csv file corresponding to the RP for
# Phone 2
df_temp = df2.iloc[:, 8:] # select all RSS readings of the APs
df2['RSS'] = df_temp.mean(axis = 1) # calculate the mean RSS value of the APs
df2 = df2[['BSSID', 'Channel', 'Width', 'Center_Frequency_0', 'Center_Frequency_1', 'Band', 'RSS']] # leave only
# the columns of interest
df2[['Channel', 'Width', 'Center_Frequency_0', 'Center_Frequency_1', 'Band', 'RSS']] = \
scaler_list[1].transform(df2[['Channel', 'Width', 'Center_Frequency_0', 'Center_Frequency_1', 'Band', 'RSS']]) #
# scale the values according to the estimator of Phone 2
common_APs = pd.merge(df1['BSSID'], df2['BSSID'], how='inner') # merge the dataframes
common_APs = common_APs.BSSID.unique() # obtain intersecting BSSIDs
randomAPs = random.sample(range(0, len(common_APs)), 10) # randomly select ten intersecting BSSIDs
for j in range(len(df1)): # iterate over all rows of the dataframe of Phone 1
for k in range(len(randomAPs)): # iterate over all randomly selected BSSIDs
if df1.BSSID[j] == common_APs[k]: # if a match is found assign a unique color to it and
# plot the data associated with it (i.e., Channel, Width, Center_Frequency_0, Center_Frequency_1,
# Band, and RSS)
if df1.BSSID[j] == common_APs[0]:
pcolor = 'r'
lstyle = (0, (5, 19))
elif df1.BSSID[j] == common_APs[1]:
pcolor = 'b'
lstyle = (0, (5, 17))
elif df1.BSSID[j] == common_APs[2]:
pcolor = 'g'
lstyle = (0, (5, 15))
elif df1.BSSID[j] == common_APs[3]:
pcolor = 'c'
lstyle = (0, (5, 13))
elif df1.BSSID[j] == common_APs[4]:
pcolor = 'm'
lstyle = (0, (5, 11))
elif df1.BSSID[j] == common_APs[5]:
pcolor = 'y'
lstyle = (0, (5, 9))
elif df1.BSSID[j] == common_APs[6]:
pcolor = 'k'
lstyle = (0, (5, 7))
elif df1.BSSID[j] == common_APs[7]:
pcolor = 'tab:brown'
lstyle = (0, (5, 5))
elif df1.BSSID[j] == common_APs[8]:
pcolor = 'tab:orange'
lstyle = (0, (5, 3))
elif df1.BSSID[j] == common_APs[9]:
pcolor = 'tab:gray'
lstyle = (0, (5, 1))
dataRow = df1.iloc[j, 1:]
dataRow.plot(color=pcolor, ax=axs[0, i], grid=True, alpha=1, legend=True, linestyle=lstyle, lw=1)
legend_elements = [Line2D([0], [0], color='r', lw=1, label=str(common_APs[0])),
Line2D([0], [0], color='b', lw=1, label=str(common_APs[1])),
Line2D([0], [0], color='g', lw=1, label=str(common_APs[2])),
Line2D([0], [0], color='c', lw=1, label=str(common_APs[3])),
Line2D([0], [0], color='m', lw=1, label=str(common_APs[4])),
Line2D([0], [0], color='y', lw=1, label=str(common_APs[5])),
Line2D([0], [0], color='k', lw=1, label=str(common_APs[6])),
Line2D([0], [0], color='tab:brown', lw=1, label=str(common_APs[7])),
Line2D([0], [0], color='tab:orange', lw=1, label=str(common_APs[8])),
Line2D([0], [0], color='tab:gray', lw=1, label=str(common_APs[9]))]
axs[0, i].legend(handles=legend_elements, title='BSSID:', title_fontsize=5)
axs[0, i].set_title('RP_' + str(randomRP[i]))
for j in range(len(df2)): # iterate over all rows of the dataframe of Phone 2
for k in range(len(randomAPs)): # iterate over all randomly selected BSSIDs
if df2.BSSID[j] == common_APs[k]: # if a match is found assign a unique color to it and
# plot the data associated with it (i.e., Channel, Width, Center_Frequency_0, Center_Frequency_1,
# Band, and RSS)
if df2.BSSID[j] == common_APs[0]:
pcolor = 'r'
lstyle = (0, (5, 19))
elif df2.BSSID[j] == common_APs[1]:
pcolor = 'b'
lstyle = (0, (5, 17))
elif df2.BSSID[j] == common_APs[2]:
pcolor = 'g'
lstyle = (0, (5, 15))
elif df2.BSSID[j] == common_APs[3]:
pcolor = 'c'
lstyle = (0, (5, 13))
elif df2.BSSID[j] == common_APs[4]:
pcolor = 'm'
lstyle = (0, (5, 11))
elif df2.BSSID[j] == common_APs[5]:
pcolor = 'y'
lstyle = (0, (5, 9))
elif df2.BSSID[j] == common_APs[6]:
pcolor = 'k'
lstyle = (0, (5, 7))
elif df2.BSSID[j] == common_APs[7]:
pcolor = 'tab:brown'
lstyle = (0, (5, 5))
elif df2.BSSID[j] == common_APs[8]:
pcolor = 'tab:orange'
lstyle = (0, (5, 3))
elif df2.BSSID[j] == common_APs[9]:
pcolor = 'tab:gray'
lstyle = (0, (5, 1))
dataRow = df2.iloc[j, 1:]
dataRow.plot(color=pcolor, ax=axs[1, i], grid=True, alpha=1, linestyle=lstyle, lw=1)
for ax in fig.axes:
plt.sca(ax)
plt.xticks(rotation=20, ha='right')
x = np.arange(6)
for i in range(2):
for j in range(4):
axs[i, j].set_xticks(x)
axs[0, j].set_xticklabels(('', '', '', '', '', ''))
axs[1, j].set_xticklabels(('Channel', 'Width', 'Center_Frequency_0', 'Center_Frequency_1', 'Band', 'RSS'))
plt.tight_layout()
plt.show()
def Validity2_Bluetooth():
# ---------------------- Setting up the layout of the plot ---------------------- #
mpl.rc('font', family='serif')
plt.rc('xtick', labelsize=7)
plt.rc('ytick', labelsize=7)
plt.rc('legend', fontsize=5)
plt.rc('axes', titlesize=7)
plt.rc('figure', titlesize=9)
fig, axs = plt.subplots(2, 4, figsize=(10, 5))
fig.suptitle('Bluetooth')
axs[0, 0].set_ylabel('Phone1', rotation=0, fontsize=7, labelpad=20)
axs[1, 0].set_ylabel('Phone2', rotation=0, fontsize=7, labelpad=20)
y = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
x = np.arange(2)
twinax = {}
for i in range(2):
for j in range(4):
axs[i, j].set_yticks(y)
axs[i, j].set_ylim(0, 1)
axs[i, j].set_xticks(x)
axs[1, j].set_xticklabels(('RSS', 'Protocol'))
twinax[i, j] = axs[i, j].twinx()
twinax[i, j].set_yticks([0, 0.5, 1])
twinax[i, j].set_yticklabels(('', '', ''))
twinax[i, 3].set_yticklabels(('CLASSIC', 'DUAL', 'BLE'))
# ------------------------------------------------------------------------------- #
df_all = pd.DataFrame() # initializing a dataframe to aggregate all Bluetooth .csv files for a given phone
scaler_list = [] # initializing a list to hold the Min Max estimators (i.e., one for each phone)
for i in range(2): # iterate over phones
for j in range(122): # iterate over PRs
df_temp = pd.read_csv(
'Phone' + str(i + 1) + '_Bluetooth_' + str(j + 1) + '.csv') # read the Bluetooth .csv file
df_all = df_all.append(df_temp, ignore_index=True) # append it to the df_all dataframe
df_all = df_all[['MAC_address', 'RSS', 'Protocol']] # after aggregation, leave only the columns of interest
df_all.replace({'Protocol': {'CLASSIC': 0, 'DUAL': 1, 'BLE': 2}}, inplace=True) # replace string with integer
# values for processing
scaler = MinMaxScaler(feature_range=(0, 1)) # use Min Max estimator to scale features between 0 and 1
scaler.fit(df_all[['RSS', 'Protocol']]) # fit the estimator to the columns of interest
scaler_list.append(scaler) # append the estimator to the list to be used for the respective phone
randomRP = random.sample(range(1, 122), 4) # randomly select 4 RPs
for i in range(len(randomRP)): # iterate over the selected RPs
df1 = pd.read_csv(
'Phone1_Bluetooth_' + str(randomRP[i]) + '.csv') # read the Bluetooth .csv file corresponding to
# the RP for Phone 1
df1 = df1[['MAC_address', 'RSS', 'Protocol']] # leave only the columns of interest
df1.replace({'Protocol': {'CLASSIC': 0, 'DUAL': 1, 'BLE': 2}},
inplace=True) # replace string with integer values
# for processing
df1[['RSS', 'Protocol']] = scaler_list[0].transform(
df1[['RSS', 'Protocol']]) # scale the values according to the
# estimator of Phone 1
df2 = pd.read_csv(
'Phone2_Bluetooth_' + str(randomRP[i]) + '.csv') # read the Bluetooth .csv file corresponding to the
# RP for Phone 2
df2 = df2[['MAC_address', 'RSS', 'Protocol']] # leave only the columns of interest
df2.replace({'Protocol': {'CLASSIC': 0, 'DUAL': 1, 'BLE': 2}},
inplace=True) # replace string with integer values
# for processing
df2[['RSS', 'Protocol']] = scaler_list[1].transform(
df2[['RSS', 'Protocol']]) # scale the values according to the
# estimator of Phone 2
common_devices = pd.merge(df1['MAC_address'], df2['MAC_address'], how='inner') # merge the dataframes
common_devices = common_devices.MAC_address.unique() # obtain intersecting MAC addresses
if len(common_devices) < 10: # print an error message in case intersecting MAC addresses is less than ten
print('common devices for RP',randomRP[i],'is less than ten, please execute again!')
random_devices = random.sample(range(0, len(common_devices)),
10) # randomly select ten intersecting MAC addresses
for j in range(len(df1)): # iterate over all rows of the dataframe of Phone 1
for k in range(len(random_devices)): # iterate over all randomly selected MAC addresses
if df1.MAC_address[j] == common_devices[k]: # if a match is found assign a
# unique color to it and plot the data associated with it (i.e., RSS and Protocol type)
if df1.MAC_address[j] == common_devices[0]:
pcolor = 'r'
lstyle = (0, (5, 19))
elif df1.MAC_address[j] == common_devices[1]:
pcolor = 'b'
lstyle = (0, (5, 17))
elif df1.MAC_address[j] == common_devices[2]:
pcolor = 'g'
lstyle = (0, (5, 15))
elif df1.MAC_address[j] == common_devices[3]:
pcolor = 'c'
lstyle = (0, (5, 13))
elif df1.MAC_address[j] == common_devices[4]:
pcolor = 'm'
lstyle = (0, (5, 11))
elif df1.MAC_address[j] == common_devices[5]:
pcolor = 'y'
lstyle = (0, (5, 9))
elif df1.MAC_address[j] == common_devices[6]:
pcolor = 'k'
lstyle = (0, (5, 7))
elif df1.MAC_address[j] == common_devices[7]:
pcolor = 'tab:brown'
lstyle = (0, (5, 5))
elif df1.MAC_address[j] == common_devices[8]:
pcolor = 'tab:orange'
lstyle = (0, (5, 3))
elif df1.MAC_address[j] == common_devices[9]:
pcolor = 'tab:gray'
lstyle = (0, (5, 1))
dataRow = df1.iloc[j, 1:]
dataRow.plot(color=pcolor, linestyle=lstyle, ax=axs[0, i], grid=True, alpha=1, legend=True, lw=1)
legend_elements = [
Line2D([0], [0], color='r', lw=1, label=str(common_devices[0])),
Line2D([0], [0], color='b', lw=1, label=str(common_devices[1])),
Line2D([0], [0], color='g', lw=1, label=str(common_devices[2])),
Line2D([0], [0], color='c', lw=1, label=str(common_devices[3])),
Line2D([0], [0], color='m', lw=1, label=str(common_devices[4])),
Line2D([0], [0], color='y', lw=1, label=str(common_devices[5])),
Line2D([0], [0], color='k', lw=1, label=str(common_devices[6])),
Line2D([0], [0], color='tab:brown', lw=1, label=str(common_devices[7])),
Line2D([0], [0], color='tab:orange', lw=1,
label=str(common_devices[8])),
Line2D([0], [0], color='tab:gray', lw=1, label=str(common_devices[9]))]
axs[0, i].legend(handles=legend_elements, title='MAC_address:', title_fontsize=5)
axs[0, i].set_title('RP_' + str(randomRP[i]))
for j in range(len(df2)): # iterate over all rows of the dataframe of Phone 2
for k in range(len(random_devices)): # iterate over all randomly selected MAC addresses
if df2.MAC_address[j] == common_devices[k]: # if a match is found assign a
# unique color to it and plot the data associated with it (i.e., RSS and Protocol type)
if df2.MAC_address[j] == common_devices[0]:
pcolor = 'r'
lstyle = (0, (5, 19))
elif df2.MAC_address[j] == common_devices[1]:
pcolor = 'b'
lstyle = (0, (5, 17))
elif df2.MAC_address[j] == common_devices[2]:
pcolor = 'g'
lstyle = (0, (5, 15))
elif df2.MAC_address[j] == common_devices[3]:
pcolor = 'c'
lstyle = (0, (5, 13))
elif df2.MAC_address[j] == common_devices[4]:
pcolor = 'm'
lstyle = (0, (5, 11))
elif df2.MAC_address[j] == common_devices[5]:
pcolor = 'y'
lstyle = (0, (5, 9))
elif df2.MAC_address[j] == common_devices[6]:
pcolor = 'k'
lstyle = (0, (5, 7))
elif df2.MAC_address[j] == common_devices[7]:
pcolor = 'tab:brown'
lstyle = (0, (5, 5))
elif df2.MAC_address[j] == common_devices[8]:
pcolor = 'tab:orange'
lstyle = (0, (5, 3))
elif df2.MAC_address[j] == common_devices[9]:
pcolor = 'tab:gray'
lstyle = (0, (5, 1))
dataRow = df2.iloc[j, 1:]
dataRow.plot(color=pcolor, linestyle=lstyle, ax=axs[1, i], grid=True, alpha=1, lw=1)
for ax in fig.axes:
plt.sca(ax)
plt.xticks(rotation=20, ha='right')
for ax in axs.flat:
ax.label_outer()
plt.tight_layout()
plt.show()
def Validity2_Cellular():
# ---------------------------------- Setting up the layout of the plot ---------------------------------- #
mpl.rc('font', family='serif')
plt.rc('xtick', labelsize=7)
plt.rc('ytick', labelsize=7)
plt.rc('legend', fontsize=7)
plt.rc('axes', titlesize=7)
plt.rc('figure', titlesize=9)
fig, axs = plt.subplots(2, 4, figsize=(10, 5))
fig.suptitle('Cellular')
axs[0, 0].set_ylabel('Phone1', rotation=0, fontsize=7, labelpad=20)
axs[1, 0].set_ylabel('Phone2', rotation=0, fontsize=7, labelpad=20)
# ------------------------------------------------------------------------------------------------------- #
df_all = | pd.DataFrame() | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assertTrue((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02',
'2014-02', '2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestDatetime64(tm.TestCase):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='D', start=datetime(1998, 1, 1), periods=365)
self.assertEqual(dti.year[0], 1998)
self.assertEqual(dti.month[0], 1)
self.assertEqual(dti.day[0], 1)
self.assertEqual(dti.hour[0], 0)
self.assertEqual(dti.minute[0], 0)
self.assertEqual(dti.second[0], 0)
self.assertEqual(dti.microsecond[0], 0)
self.assertEqual(dti.dayofweek[0], 3)
self.assertEqual(dti.dayofyear[0], 1)
self.assertEqual(dti.dayofyear[120], 121)
self.assertEqual(dti.weekofyear[0], 1)
self.assertEqual(dti.weekofyear[120], 18)
self.assertEqual(dti.quarter[0], 1)
self.assertEqual(dti.quarter[120], 2)
self.assertEqual(dti.is_month_start[0], True)
self.assertEqual(dti.is_month_start[1], False)
self.assertEqual(dti.is_month_start[31], True)
self.assertEqual(dti.is_quarter_start[0], True)
self.assertEqual(dti.is_quarter_start[90], True)
self.assertEqual(dti.is_year_start[0], True)
self.assertEqual(dti.is_year_start[364], False)
self.assertEqual(dti.is_month_end[0], False)
self.assertEqual(dti.is_month_end[30], True)
self.assertEqual(dti.is_month_end[31], False)
self.assertEqual(dti.is_month_end[364], True)
self.assertEqual(dti.is_quarter_end[0], False)
self.assertEqual(dti.is_quarter_end[30], False)
self.assertEqual(dti.is_quarter_end[89], True)
self.assertEqual(dti.is_quarter_end[364], True)
self.assertEqual(dti.is_year_end[0], False)
self.assertEqual(dti.is_year_end[364], True)
self.assertEqual(len(dti.year), 365)
self.assertEqual(len(dti.month), 365)
self.assertEqual(len(dti.day), 365)
self.assertEqual(len(dti.hour), 365)
self.assertEqual(len(dti.minute), 365)
self.assertEqual(len(dti.second), 365)
self.assertEqual(len(dti.microsecond), 365)
self.assertEqual(len(dti.dayofweek), 365)
self.assertEqual(len(dti.dayofyear), 365)
self.assertEqual(len(dti.weekofyear), 365)
self.assertEqual(len(dti.quarter), 365)
self.assertEqual(len(dti.is_month_start), 365)
self.assertEqual(len(dti.is_month_end), 365)
self.assertEqual(len(dti.is_quarter_start), 365)
self.assertEqual(len(dti.is_quarter_end), 365)
self.assertEqual(len(dti.is_year_start), 365)
self.assertEqual(len(dti.is_year_end), 365)
dti = DatetimeIndex(
freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4)
self.assertEqual(sum(dti.is_quarter_start), 0)
self.assertEqual(sum(dti.is_quarter_end), 4)
self.assertEqual(sum(dti.is_year_start), 0)
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
if not _np_version_under1p7:
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
self.assertEqual(dti.is_month_start[0], 1)
tests = [
(Timestamp('2013-06-01', offset='M').is_month_start, 1),
(Timestamp('2013-06-01', offset='BM').is_month_start, 0),
(Timestamp('2013-06-03', offset='M').is_month_start, 0),
(Timestamp('2013-06-03', offset='BM').is_month_start, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', offset='BQ').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQ').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_quarter_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQS-APR').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQS-APR').is_quarter_end, 1),
(Timestamp('2013-03-29', offset='BQS-APR').is_year_end, 1),
(Timestamp('2013-11-01', offset='AS-NOV').is_year_start, 1),
(Timestamp('2013-10-31', offset='AS-NOV').is_year_end, 1)]
for ts, value in tests:
self.assertEqual(ts, value)
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
self.assert_numpy_array_equal(dti.nanosecond, np.arange(10))
def test_datetimeindex_diff(self):
dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
self.assertEqual(len(dti1.diff(dti2)), 2)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(s[48], 48)
self.assertEqual(s['1/2/2009'], 48)
self.assertEqual(s['2009-1-2'], 48)
self.assertEqual(s[datetime(2009, 1, 2)], 48)
self.assertEqual(s[lib.Timestamp(datetime(2009, 1, 2))], 48)
self.assertRaises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
self.assertEqual(s[48], -1)
s['1/2/2009'] = -2
self.assertEqual(s[48], -2)
s['1/2/2009':'2009-06-05'] = -3
self.assertTrue((s[48:54] == -3).all())
def test_datetimeindex_constructor(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
self.assertRaises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(
['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
self.assertTrue(idx7.equals(idx8))
for other in [idx2, idx3, idx4, idx5, idx6]:
self.assertTrue((idx1.values == other.values).all())
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = DatetimeIndex(start=sdate, freq='1B', periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[0], sdate + 0 * dt.bday)
self.assertEqual(idx.freq, 'B')
idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[-1], edate)
self.assertEqual(idx.freq, '5D')
idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.Week(weekday=6))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.QuarterBegin(startingMonth=1))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.BQuarterEnd(startingMonth=12))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
def test_dayfirst(self):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10),
datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True)
idx4 = to_datetime(np.array(arr), dayfirst=True)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
self.assertTrue(expected.equals(idx1))
self.assertTrue(expected.equals(idx2))
self.assertTrue(expected.equals(idx3))
self.assertTrue(expected.equals(idx4))
self.assertTrue(expected.equals(idx5))
self.assertTrue(expected.equals(idx6))
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
self.assertTrue((res == exp).all())
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
self.assertTrue((res == exp).all())
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
self.assertEqual(d2.dtypes[0], np.dtype('M8[ns]'))
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
self.assertEqual(df.index[0], stamp)
self.assertEqual(df.reset_index()['Date'][0], stamp)
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.reindex(idx2)
self.assertTrue(df.index.equals(idx2))
def test_datetimeindex_union_join_empty(self):
dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D')
empty = Index([])
result = dti.union(empty)
tm.assert_isinstance(result, DatetimeIndex)
self.assertIs(result, result)
result = dti.join(empty)
tm.assert_isinstance(result, DatetimeIndex)
def test_series_set_value(self):
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
s = Series().set_value(dates[0], 1.)
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# self.assertEqual(s2.values.dtype, 'M8[ns]')
@slow
def test_slice_locs_indexerror(self):
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.ix[datetime(1900, 1, 1):datetime(2100, 1, 1)]
class TestSeriesDatetime64(tm.TestCase):
def setUp(self):
self.series = Series(date_range('1/1/2000', periods=10))
def test_auto_conversion(self):
series = Series(list(date_range('1/1/2000', periods=10)))
self.assertEqual(series.dtype, 'M8[ns]')
def test_constructor_cant_cast_datetime64(self):
self.assertRaises(TypeError, Series,
date_range('1/1/2000', periods=10), dtype=float)
def test_series_comparison_scalars(self):
val = datetime(2000, 1, 4)
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
val = self.series[5]
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
def test_between(self):
left, right = self.series[[2, 7]]
result = self.series.between(left, right)
expected = (self.series >= left) & (self.series <= right)
assert_series_equal(result, expected)
#----------------------------------------------------------------------
# NaT support
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
val = series[3]
self.assertTrue(com.isnull(val))
series[2] = val
self.assertTrue(com.isnull(series[2]))
def test_set_none_nan(self):
self.series[3] = None
self.assertIs(self.series[3], NaT)
self.series[3:5] = None
self.assertIs(self.series[4], NaT)
self.series[5] = np.nan
self.assertIs(self.series[5], NaT)
self.series[5:7] = np.nan
self.assertIs(self.series[6], NaT)
def test_intercept_astype_object(self):
# this test no longer makes sense as series is by default already M8[ns]
expected = self.series.astype('object')
df = DataFrame({'a': self.series,
'b': np.random.randn(len(self.series))})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
df = DataFrame({'a': self.series,
'b': ['foo'] * len(self.series)})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
def test_union(self):
rng1 = date_range('1/1/1999', '1/1/2012', freq='MS')
s1 = Series(np.random.randn(len(rng1)), rng1)
rng2 = date_range('1/1/1980', '12/1/2001', freq='MS')
s2 = Series(np.random.randn(len(rng2)), rng2)
df = DataFrame({'s1': s1, 's2': s2})
self.assertEqual(df.index.values.dtype, np.dtype('M8[ns]'))
def test_intersection(self):
rng = date_range('6/1/2000', '6/15/2000', freq='D')
rng = rng.delete(5)
rng2 = date_range('5/15/2000', '6/20/2000', freq='D')
rng2 = DatetimeIndex(rng2.values)
result = rng.intersection(rng2)
self.assertTrue(result.equals(rng))
# empty same freq GH2129
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
self.assertEqual(len(result), 0)
result = rng.intersection(rng[0:0])
self.assertEqual(len(result), 0)
def test_date_range_bms_bug(self):
# #1645
rng = date_range('1/1/2000', periods=10, freq='BMS')
ex_first = Timestamp('2000-01-03')
self.assertEqual(rng[0], ex_first)
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.ix['1/3/2000']
self.assertEqual(result.name, df.index[2])
result = df.T['1/3/2000']
self.assertEqual(result.name, df.index[2])
class TestTimestamp(tm.TestCase):
def test_class_ops(self):
_skip_if_no_pytz()
import pytz
def compare(x,y):
self.assertEqual(int(Timestamp(x).value/1e9), int(Timestamp(y).value/1e9))
compare(Timestamp.now(),datetime.now())
compare(Timestamp.now('UTC'),datetime.now(pytz.timezone('UTC')))
compare(Timestamp.utcnow(),datetime.utcnow())
compare(Timestamp.today(),datetime.today())
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 500)
def test_unit(self):
def check(val,unit=None,h=1,s=1,us=0):
stamp = Timestamp(val, unit=unit)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.day, 1)
self.assertEqual(stamp.hour, h)
if unit != 'D':
self.assertEqual(stamp.minute, 1)
self.assertEqual(stamp.second, s)
self.assertEqual(stamp.microsecond, us)
else:
self.assertEqual(stamp.minute, 0)
self.assertEqual(stamp.second, 0)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 0)
ts = Timestamp('20000101 01:01:01')
val = ts.value
days = (ts - Timestamp('1970-01-01')).days
check(val)
check(val/long(1000),unit='us')
check(val/long(1000000),unit='ms')
check(val/long(1000000000),unit='s')
check(days,unit='D',h=0)
# using truediv, so these are like floats
if compat.PY3:
check((val+500000)/long(1000000000),unit='s',us=500)
check((val+500000000)/long(1000000000),unit='s',us=500000)
check((val+500000)/long(1000000),unit='ms',us=500)
# get chopped in py2
else:
check((val+500000)/long(1000000000),unit='s')
check((val+500000000)/long(1000000000),unit='s')
check((val+500000)/long(1000000),unit='ms')
# ok
check((val+500000)/long(1000),unit='us',us=500)
check((val+500000000)/long(1000000),unit='ms',us=500000)
# floats
check(val/1000.0 + 5,unit='us',us=5)
check(val/1000.0 + 5000,unit='us',us=5000)
check(val/1000000.0 + 0.5,unit='ms',us=500)
check(val/1000000.0 + 0.005,unit='ms',us=5)
check(val/1000000000.0 + 0.5,unit='s',us=500000)
check(days + 0.5,unit='D',h=12)
# nan
result = Timestamp(np.nan)
self.assertIs(result, NaT)
result = Timestamp(None)
self.assertIs(result, NaT)
result = Timestamp(iNaT)
self.assertIs(result, NaT)
result = Timestamp(NaT)
self.assertIs(result, NaT)
def test_comparison(self):
# 5-18-2012 00:00:00.000
stamp = long(1337299200000000000)
val = Timestamp(stamp)
self.assertEqual(val, val)
self.assertFalse(val != val)
self.assertFalse(val < val)
self.assertTrue(val <= val)
self.assertFalse(val > val)
self.assertTrue(val >= val)
other = datetime(2012, 5, 18)
self.assertEqual(val, other)
self.assertFalse(val != other)
self.assertFalse(val < other)
self.assertTrue(val <= other)
self.assertFalse(val > other)
self.assertTrue(val >= other)
other = Timestamp(stamp + 100)
self.assertNotEqual(val, other)
self.assertNotEqual(val, other)
self.assertTrue(val < other)
self.assertTrue(val <= other)
self.assertTrue(other > val)
self.assertTrue(other >= val)
def test_cant_compare_tz_naive_w_aware(self):
_skip_if_no_pytz()
# #1404
a = Timestamp('3/12/2012')
b = Timestamp('3/12/2012', tz='utc')
self.assertRaises(Exception, a.__eq__, b)
self.assertRaises(Exception, a.__ne__, b)
self.assertRaises(Exception, a.__lt__, b)
self.assertRaises(Exception, a.__gt__, b)
self.assertRaises(Exception, b.__eq__, a)
self.assertRaises(Exception, b.__ne__, a)
self.assertRaises(Exception, b.__lt__, a)
self.assertRaises(Exception, b.__gt__, a)
if sys.version_info < (3, 3):
self.assertRaises(Exception, a.__eq__, b.to_pydatetime())
self.assertRaises(Exception, a.to_pydatetime().__eq__, b)
else:
self.assertFalse(a == b.to_pydatetime())
self.assertFalse(a.to_pydatetime() == b)
def test_delta_preserve_nanos(self):
val = Timestamp(long(1337299200000000123))
result = val + timedelta(1)
self.assertEqual(result.nanosecond, val.nanosecond)
def test_frequency_misc(self):
self.assertEqual(fmod.get_freq_group('T'),
fmod.FreqGroup.FR_MIN)
code, stride = fmod.get_freq_code(offsets.Hour())
self.assertEqual(code, fmod.FreqGroup.FR_HR)
code, stride = fmod.get_freq_code((5, 'T'))
self.assertEqual(code, fmod.FreqGroup.FR_MIN)
self.assertEqual(stride, 5)
offset = offsets.Hour()
result = fmod.to_offset(offset)
self.assertEqual(result, offset)
result = fmod.to_offset((5, 'T'))
expected = offsets.Minute(5)
self.assertEqual(result, expected)
self.assertRaises(ValueError, fmod.get_freq_code, (5, 'baz'))
self.assertRaises(ValueError, fmod.to_offset, '100foo')
self.assertRaises(ValueError, fmod.to_offset, ('', ''))
result = fmod.get_standard_freq(offsets.Hour())
self.assertEqual(result, 'H')
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
self.assertEqual(d[stamp], 5)
def test_timestamp_compare_scalars(self):
# case where ndim == 0
lhs = np.datetime64(datetime(2013, 12, 6))
rhs = Timestamp('now')
nat = Timestamp('nat')
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
if pd._np_version_under1p7:
# you have to convert to timestamp for this to work with numpy
# scalars
expected = left_f(Timestamp(lhs), rhs)
# otherwise a TypeError is thrown
if left not in ('eq', 'ne'):
with tm.assertRaises(TypeError):
left_f(lhs, rhs)
else:
expected = left_f(lhs, rhs)
result = right_f(rhs, lhs)
self.assertEqual(result, expected)
expected = left_f(rhs, nat)
result = right_f(nat, rhs)
self.assertEqual(result, expected)
def test_timestamp_compare_series(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
s = Series(date_range('20010101', periods=10), name='dates')
s_nat = s.copy(deep=True)
s[0] = pd.Timestamp('nat')
s[3] = pd.Timestamp('nat')
ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(s, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s)
tm.assert_series_equal(result, expected)
# nats
expected = left_f(s, Timestamp('nat'))
result = right_f(Timestamp('nat'), s)
tm.assert_series_equal(result, expected)
# compare to timestamp with series containing nats
expected = left_f(s_nat, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s_nat)
tm.assert_series_equal(result, expected)
# compare to nat with series containing nats
expected = left_f(s_nat, Timestamp('nat'))
result = right_f(Timestamp('nat'), s_nat)
tm.assert_series_equal(result, expected)
class TestSlicing(tm.TestCase):
def test_slice_year(self):
dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
result = s['2005']
expected = s[s.index.year == 2005]
assert_series_equal(result, expected)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
result = df.ix['2005']
expected = df[df.index.year == 2005]
assert_frame_equal(result, expected)
rng = date_range('1/1/2000', '1/1/2010')
result = rng.get_loc('2009')
expected = slice(3288, 3653)
self.assertEqual(result, expected)
def test_slice_quarter(self):
dti = DatetimeIndex(freq='D', start=datetime(2000, 6, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(len(s['2001Q1']), 90)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
self.assertEqual(len(df.ix['1Q01']), 90)
def test_slice_month(self):
dti = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(len(s['2005-11']), 30)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
self.assertEqual(len(df.ix['2005-11']), 30)
assert_series_equal(s['2005-11'], s['11-2005'])
def test_partial_slice(self):
rng = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-05':'2006-02']
expected = s['20050501':'20060228']
assert_series_equal(result, expected)
result = s['2005-05':]
expected = s['20050501':]
assert_series_equal(result, expected)
result = s[:'2006-02']
expected = s[:'20060228']
assert_series_equal(result, expected)
result = s['2005-1-1']
self.assertEqual(result, s.irow(0))
self.assertRaises(Exception, s.__getitem__, '2004-12-31')
def test_partial_slice_daily(self):
rng = DatetimeIndex(freq='H', start=datetime(2005, 1, 31), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-31']
assert_series_equal(result, s.ix[:24])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00')
def test_partial_slice_hourly(self):
rng = DatetimeIndex(freq='T', start=datetime(2005, 1, 1, 20, 0, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1']
assert_series_equal(result, s.ix[:60 * 4])
result = s['2005-1-1 20']
assert_series_equal(result, s.ix[:60])
self.assertEqual(s['2005-1-1 20:00'], s.ix[0])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:15')
def test_partial_slice_minutely(self):
rng = DatetimeIndex(freq='S', start=datetime(2005, 1, 1, 23, 59, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1 23:59']
assert_series_equal(result, s.ix[:60])
result = s['2005-1-1']
assert_series_equal(result, s.ix[:60])
self.assertEqual(s[Timestamp('2005-1-1 23:59:00')], s.ix[0])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:00:00')
def test_partial_slicing_with_multiindex(self):
# GH 4758
# partial string indexing with a multi-index buggy
df = DataFrame({'ACCOUNT':["ACCT1", "ACCT1", "ACCT1", "ACCT2"],
'TICKER':["ABC", "MNP", "XYZ", "XYZ"],
'val':[1,2,3,4]},
index=date_range("2013-06-19 09:30:00", periods=4, freq='5T'))
df_multi = df.set_index(['ACCOUNT', 'TICKER'], append=True)
expected = DataFrame([[1]],index=Index(['ABC'],name='TICKER'),columns=['val'])
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1')]
assert_frame_equal(result, expected)
expected = df_multi.loc[(pd.Timestamp('2013-06-19 09:30:00', tz=None), 'ACCT1', 'ABC')]
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1', 'ABC')]
assert_series_equal(result, expected)
# this is a KeyError as we don't do partial string selection on multi-levels
def f():
df_multi.loc[('2013-06-19', 'ACCT1', 'ABC')]
self.assertRaises(KeyError, f)
# GH 4294
# partial slice on a series mi
s = pd.DataFrame(randn(1000, 1000), index=pd.date_range('2000-1-1', periods=1000)).stack()
s2 = s[:-1].copy()
expected = s2['2000-1-4']
result = s2[pd.Timestamp('2000-1-4')]
assert_series_equal(result, expected)
result = s[pd.Timestamp('2000-1-4')]
expected = s['2000-1-4']
assert_series_equal(result, expected)
df2 = pd.DataFrame(s)
expected = df2.ix['2000-1-4']
result = df2.ix[pd.Timestamp('2000-1-4')]
assert_frame_equal(result, expected)
def test_date_range_normalize(self):
snap = datetime.today()
n = 50
rng = date_range(snap, periods=n, normalize=False, freq='2D')
offset = timedelta(2)
values = np.array([snap + i * offset for i in range(n)],
dtype='M8[ns]')
self.assert_numpy_array_equal(rng, values)
rng = date_range(
'1/1/2000 08:15', periods=n, normalize=False, freq='B')
the_time = time(8, 15)
for val in rng:
self.assertEqual(val.time(), the_time)
def test_timedelta(self):
# this is valid too
index = date_range('1/1/2000', periods=50, freq='B')
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
self.assertTrue(tm.equalContents(index, back))
self.assertEqual(shifted.freq, index.freq)
self.assertEqual(shifted.freq, back.freq)
result = index - timedelta(1)
expected = index + timedelta(-1)
self.assertTrue(result.equals(expected))
# GH4134, buggy with timedeltas
rng = date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
self.assertTrue(result1.equals(result4))
self.assertTrue(result2.equals(result3))
def test_shift(self):
ts = Series(np.random.randn(5),
index=date_range('1/1/2000', periods=5, freq='H'))
result = ts.shift(1, freq='5T')
exp_index = ts.index.shift(1, freq='5T')
self.assertTrue(result.index.equals(exp_index))
# GH #1063, multiple of same base
result = ts.shift(1, freq='4H')
exp_index = ts.index + datetools.Hour(4)
self.assertTrue(result.index.equals(exp_index))
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.shift, 1)
def test_setops_preserve_freq(self):
rng = date_range('1/1/2000', '1/1/2002')
result = rng[:50].union(rng[50:100])
self.assertEqual(result.freq, rng.freq)
result = rng[:50].union(rng[30:100])
self.assertEqual(result.freq, rng.freq)
result = rng[:50].union(rng[60:100])
self.assertIsNone(result.freq)
result = rng[:50].intersection(rng[25:75])
self.assertEqual(result.freqstr, 'D')
nofreq = DatetimeIndex(list(rng[25:75]))
result = rng[:50].union(nofreq)
self.assertEqual(result.freq, rng.freq)
result = rng[:50].intersection(nofreq)
self.assertEqual(result.freq, rng.freq)
def test_min_max(self):
rng = date_range('1/1/2000', '12/31/2000')
rng2 = rng.take(np.random.permutation(len(rng)))
the_min = rng2.min()
the_max = rng2.max()
tm.assert_isinstance(the_min, Timestamp)
tm.assert_isinstance(the_max, Timestamp)
self.assertEqual(the_min, rng[0])
self.assertEqual(the_max, rng[-1])
self.assertEqual(rng.min(), rng[0])
self.assertEqual(rng.max(), rng[-1])
def test_min_max_series(self):
rng = date_range('1/1/2000', periods=10, freq='4h')
lvls = ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C', 'C']
df = DataFrame({'TS': rng, 'V': np.random.randn(len(rng)),
'L': lvls})
result = df.TS.max()
exp = Timestamp(df.TS.iget(-1))
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, exp)
result = df.TS.min()
exp = Timestamp(df.TS.iget(0))
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, exp)
def test_from_M8_structured(self):
dates = [(datetime(2012, 9, 9, 0, 0),
datetime(2012, 9, 8, 15, 10))]
arr = np.array(dates,
dtype=[('Date', 'M8[us]'), ('Forecasting', 'M8[us]')])
df = DataFrame(arr)
self.assertEqual(df['Date'][0], dates[0][0])
self.assertEqual(df['Forecasting'][0], dates[0][1])
s = Series(arr['Date'])
self.assertTrue(s[0], Timestamp)
self.assertEqual(s[0], dates[0][0])
s = Series.from_array(arr['Date'], Index([0]))
self.assertEqual(s[0], dates[0][0])
def test_get_level_values_box(self):
from pandas import MultiIndex
dates = date_range('1/1/2000', periods=4)
levels = [dates, [0, 1]]
labels = [[0, 0, 1, 1, 2, 2, 3, 3],
[0, 1, 0, 1, 0, 1, 0, 1]]
index = MultiIndex(levels=levels, labels=labels)
self.assertTrue(isinstance(index.get_level_values(0)[0], Timestamp))
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
self.assertTrue(df.x1.dtype == 'M8[ns]')
def test_date_range_fy5252(self):
dr = date_range(start="2013-01-01",
periods=2,
freq=offsets.FY5253(startingMonth=1,
weekday=3,
variation="nearest"))
self.assertEqual(dr[0], Timestamp('2013-01-31'))
self.assertEqual(dr[1], Timestamp('2014-01-30'))
class TimeConversionFormats(tm.TestCase):
def test_to_datetime_format(self):
values = ['1/1/2000', '1/2/2000', '1/3/2000']
results1 = [ Timestamp('20000101'), Timestamp('20000201'),
Timestamp('20000301') ]
results2 = [ Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103') ]
for vals, expecteds in [ (values, (Index(results1), Index(results2))),
(Series(values),(Series(results1), Series(results2))),
(values[0], (results1[0], results2[0])),
(values[1], (results1[1], results2[1])),
(values[2], (results1[2], results2[2])) ]:
for i, fmt in enumerate(['%d/%m/%Y', '%m/%d/%Y']):
result = to_datetime(vals, format=fmt)
expected = expecteds[i]
if isinstance(expected, Series):
assert_series_equal(result, Series(expected))
elif isinstance(expected, Timestamp):
self.assertEqual(result, expected)
else:
self.assertTrue(result.equals(expected))
def test_to_datetime_format_YYYYMMDD(self):
s = Series([19801222,19801222] + [19810105]*5)
expected = Series([ Timestamp(x) for x in s.apply(str) ])
result = to_datetime(s,format='%Y%m%d')
assert_series_equal(result, expected)
result = to_datetime(s.apply(str),format='%Y%m%d')
assert_series_equal(result, expected)
# with NaT
expected = Series([Timestamp("19801222"),Timestamp("19801222")] + [Timestamp("19810105")]*5)
expected[2] = np.nan
s[2] = np.nan
result = to_datetime(s,format='%Y%m%d')
assert_series_equal(result, expected)
# string with NaT
s = s.apply(str)
s[2] = 'nat'
result = to_datetime(s,format='%Y%m%d')
assert_series_equal(result, expected)
def test_to_datetime_format_microsecond(self):
val = '01-Apr-2011 00:00:01.978'
format = '%d-%b-%Y %H:%M:%S.%f'
result = to_datetime(val, format=format)
exp = dt.datetime.strptime(val, format)
self.assertEqual(result, exp)
def test_to_datetime_format_time(self):
data = [
['01/10/2010 15:20', '%m/%d/%Y %H:%M', Timestamp('2010-01-10 15:20')],
['01/10/2010 05:43', '%m/%d/%Y %I:%M', Timestamp('2010-01-10 05:43')],
['01/10/2010 13:56:01', '%m/%d/%Y %H:%M:%S', Timestamp('2010-01-10 13:56:01')]#,
#['01/10/2010 08:14 PM', '%m/%d/%Y %I:%M %p', Timestamp('2010-01-10 20:14')],
#['01/10/2010 07:40 AM', '%m/%d/%Y %I:%M %p', Timestamp('2010-01-10 07:40')],
#['01/10/2010 09:12:56 AM', '%m/%d/%Y %I:%M:%S %p', Timestamp('2010-01-10 09:12:56')]
]
for s, format, dt in data:
self.assertEqual(to_datetime(s, format=format), dt)
def test_to_datetime_format_weeks(self):
data = [
['2009324', '%Y%W%w', Timestamp('2009-08-13')],
['2013020', '%Y%U%w', Timestamp('2013-01-13')]
]
for s, format, dt in data:
self.assertEqual( | to_datetime(s, format=format) | pandas.to_datetime |
"""Tools helping with the TIMIT dataset.
Based on the version from:
https://www.kaggle.com/mfekadu/darpa-timit-acousticphonetic-continuous-speech
"""
import re
from os.path import join, splitext, dirname
from pathlib import Path
import numpy as np
import pandas as pd
import soundfile as sf
from audio_loader.ground_truth.challenge import Challenge
PHON = ['b', 'd', 'g', 'p', 't', 'k', 'dx', 'q', # Stops
'bcl', 'dcl', 'gcl', 'kcl', 'pcl', 'tcl', # Closure
'jh', 'ch', # Affricates
's', 'sh', 'z', 'zh', 'f', 'th', 'v', 'dh', # Fricatives
'm', 'n', 'ng', 'em', 'en', 'eng', 'nx', # Nasals
'l', 'r', 'w', 'y', 'hh', 'hv', 'el', # Semivowels and Glides
'iy', 'ih', 'eh', 'ey', 'ae', 'aa', 'aw', 'ay', # Vowels
'ah', 'ao', 'oy', 'ow', 'uh', 'uw', 'ux', 'er',
'ax', 'ix', 'axr', 'ax-h',
'pau', 'h#', 'epi' # Non-speech event
]
SILENCES = ['pau', 'epi', 'h#']
CLOSURES = ['bcl', 'vcl', 'dcl', 'gcl', 'kcl', 'pcl', 'tcl']
DF_PHON = pd.read_csv(join(dirname(__file__), 'timit_map.csv'), names=["original", "phon_class1", "phon_class2", "phon_class3"])
class TimitGroundTruth(Challenge):
"""Ground truth getter for TIMIT like datasets."""
def __init__(self, timit_like_root_folderpath, datapath="data", gtpath="data", gt_grouped_file=None, with_silences=True, phon_class="original", fuse_closures=True, return_original_gt=False):
"""Compatible with the TIMIT DARPA dataset available on kaggle.
To use the TIMIT DARPA dataset leave the default arguments as is.
"""
super().__init__(timit_like_root_folderpath, datapath, gtpath)
self.with_silences = with_silences
self.phon_class = phon_class
self.fuse_closures = fuse_closures
self.return_original_gt = return_original_gt
if gt_grouped_file is None:
df_train = pd.read_csv(join(self.root_folderpath, "train_data.csv"))
df_train = df_train[pd.notnull(df_train['path_from_data_dir'])]
df_test = pd.read_csv(join(self.root_folderpath, "test_data.csv"))
df_test = df_test[pd.notnull(df_test['path_from_data_dir'])]
self.df_all = df_train.append(df_test, ignore_index=True)
else:
self.df_all = pd.read_csv(join(self.root_folderpath, gt_grouped_file))
self.df_all = self.df_all[pd.notnull(self.df_all['path_from_data_dir'])]
# create the is_audio column if not present
if "is_audio" not in self.df_all.keys():
self.df_all["is_audio"] = self.df_all["path_from_data_dir"].str.match(
".*.wav",
flags=re.IGNORECASE
)
if "is_converted_audio" in self.df_all.keys():
self.df_all = self.df_all[np.logical_and(self.df_all["is_audio"],
self.df_all["is_converted_audio"])]
else:
self.df_all = self.df_all[self.df_all["is_audio"]]
if self.phon_class == "original":
self.phon2index = {phon:index for index, phon in enumerate(PHON)}
self.index2phn = PHON
self.silences = SILENCES
else:
self.index2phn = DF_PHON[self.phon_class].unique()
# put silence at last
self.index2phn = np.append(np.delete(self.index2phn, np.where(self.index2phn == "sil")), "sil")
tmp_phon2index = {phon:index for index, phon in enumerate(self.index2phn)}
# from original label to desired label
self.phon2index = {phon:tmp_phon2index[DF_PHON.loc[DF_PHON["original"] == phon][self.phon_class].values[0]] for phon in DF_PHON["original"].unique()}
self.silences = ["sil"]
self.index2speaker_id = pd.unique(self.df_all["speaker_id"])
self.speaker_id2index = {speaker_id:index for index, speaker_id in enumerate(self.index2speaker_id)}
self.dict_gt = get_dict_gt(join(self.root_folderpath, self.gtpath), self.df_all)
self.set_gt_format()
@property
def number_of_speakers(self):
"""Return the number of speakers in the Timit challenge."""
return len(self.index2speaker_id)
@property
def training_set(self):
"""Return array of filepaths from training test."""
return self.df_all[self.df_all["test_or_train"] == "TRAIN"]["path_from_data_dir"].values
@property
def testing_set(self):
"""Return array of filepaths from testing test."""
return self.df_all[self.df_all["test_or_train"] == "TEST"]["path_from_data_dir"].values
@property
def gt_size(self):
"""Return the size of the ground_truth."""
size = 0
if self.phonetic:
size += self.phon_size
if self.word:
raise Exception("Word not yet implemented.")
if self.speaker_id:
size += 1
return size
@property
def phon_size(self):
if self.with_silences:
return len(self.index2phn)
return len(self.index2phn) - len(self.silences)
@property
def speaker_id_size(self):
return 1
def get_phonem_from(self, index):
"""Return the phoneme corresponding to the given index."""
return self.index2phn[index]
def get_index_from(self, phn):
"""Return the index corresponding to the given phoneme."""
return self.phon2index[phn]
def set_gt_format(self, phonetic=True, word=False, speaker_id=False):
"""Select the ground truth to show"""
self.phonetic = phonetic
self.word = word
self.speaker_id = speaker_id
def get_samples_time_in(self, filepath):
"""Return a list of tuples corresponding to the start and end times of each sample.
Parameters:
-----------
filepath: str
Filepath of the audio file we want to get the ground truth times.
"""
audio_id = self.get_id(filepath)
df_file, speaker_id = self.dict_gt[audio_id]
res_list = []
for row in df_file.iterrows():
res_list.append((row[1][0], row[1][1]))
return res_list
def get_gt_for(self, filepath):
"""Get tuples corresponding to the start, end times of each sample and
the ground truth expected.
Parameters:
-----------
filepath: str
Filepath of the audio file we want to get the ground truth.
"""
audio_id = self.get_id(filepath)
df_file, speaker_id = self.dict_gt[audio_id]
ys = np.zeros((len(df_file.index), self.gt_size))
res_list = []
i = 0
if self.fuse_closures:
previous_label = None
previous_sample_begin = None
for row in df_file.iterrows():
sample_begin, sample_end = row[1][0], row[1][1]
self._fill_output(audio_id, sample_begin, sample_end, ys[i])
# other way to get gt label
gt_label = row[1][2]
if gt_label in CLOSURES:
previous_label = gt_label
previous_sample_begin = sample_begin
else:
if previous_label is not None and previous_label[0] == gt_label:
sample_begin = previous_sample_begin
if self.with_silences or np.sum(ys[i]) > 0:
if self.return_original_gt:
res_list.append((sample_begin, sample_end, (ys[i], gt_label)))
else:
res_list.append((sample_begin, sample_end, ys[i]))
previous_label = None
previous_sample_begin = None
i += 1
else:
for row in df_file.iterrows():
sample_begin, sample_end = row[1][0], row[1][1]
self._fill_output(audio_id, sample_begin, sample_end, ys[i])
if self.with_silences or np.sum(ys[i]) > 0:
res_list.append((sample_begin, sample_end, ys[i]))
i += 1
return res_list
def _fill_output(self, id_audio, sample_begin, sample_end, output):
"""Tool to fill an output array.
Parameters
----------
id_audio: str
id of the audio file
sample_begin: integer > 0
sample_end: integer > 0
output: np.array
Array to fill with ground truth (supposed zeros).
"""
if self.phonetic:
self._fill_phon_output(id_audio, sample_begin, sample_end, output[:self.phon_size])
if self.word:
raise Exception("Word not yet implemented.")
if self.speaker_id:
output[-self.speaker_id_size] = self._get_speaker_id(id_audio)
def get_majority_gt_at_sample(self, filepath, sample_begin, sample_end):
"""Return an integer that represent the majority class for a specific sample."""
output = []
if self.phonetic:
output += self._phon_majority(self.get_id(filepath), sample_begin, sample_end)
if self.word:
raise Exception("Word not yet implemented.")
if self.speaker_id:
output += self._get_speaker_id(self.get_id(filepath))
return output
def get_output_description(self):
"""Return a list that describe the output."""
output = []
if self.phonetic:
output += PHON
if self.word:
raise Exception("Word not yet implemented.")
if self.speaker_id:
output += "Speaker Id"
return output
def _phon_majority(self, id_audio, sample_begin, sample_end):
df_file, speaker_id = self.dict_gt[id_audio]
df_corresponding_time = df_file[np.logical_and(df_file["start_time"] < sample_end,
df_file["end_time"] >= sample_begin)]
if len(df_corresponding_time) > 1:
raise Exception("phon majority does not handle multiple labels")
return df_corresponding_time["phn"].values
def _fill_phon_output(self, id_audio, sample_begin, sample_end, output):
"""Tool to fill an output array.
Parameters
----------
id_audio: str
Id of the audio file.
sample_begin: integer > 0
sample_end: integer > 0
output: np.array
Array to modify/fill with ground truth.
"""
df_file, speaker_id = self.dict_gt[id_audio]
df_corresponding_time = df_file[np.logical_and(df_file["start_time"] <= sample_end,
df_file["end_time"] >= sample_begin)]
total_samples = sample_end - sample_begin
for row in df_corresponding_time.iterrows():
start_frame = max(row[1][0], sample_begin)
end_frame = min(row[1][1], sample_end)
if self.with_silences or self.phon2index[row[1][2]] < len(output):
output[self.phon2index[row[1][2]]] += (end_frame - start_frame) / total_samples
def _get_speaker_id(self, id_audio):
"""Tool to fill an output array.
Parameters
----------
id_audio: str
Id of the audio file.
output: np.array
Array to modify/fill with ground truth.
"""
_, speaker_id = self.dict_gt[id_audio]
return self.speaker_id2index[speaker_id]
def get_dict_gt(gt_folderpath, df_data):
"""Get dataframe corresponding to the gt."""
if gt_folderpath[-1] != "/":
gt_folderpath += "/"
dic = {}
for filename in Path(gt_folderpath).glob('**/*.PHN'):
id_fn = splitext(str(filename).replace(gt_folderpath, ""))[0]
speaker_id = df_data[df_data["path_from_data_dir"].str.contains(id_fn, regex=False)]["speaker_id"].iloc[0]
df_file = | pd.read_csv(filename, names=["start_time", "end_time", "phn"], delimiter=" ") | pandas.read_csv |
#-------------------------------------------------------------------------------
# Author: hjiang
# Email: <EMAIL>
# Time: Thu Nov 25 14:18:20 2021
#-------------------------------------------------------------------------------
from clickhouse_driver import Client
from datetime import datetime, timedelta
from multiprocessing import Pool
from DataPlatForm.configs import *
import numpy as np
import pandas as pd
import re
import time
import os
#------------------------------------------------------------------------------
# 判断一个列表是不是同质列表(列表中所有的元素类型都相同)
#------------------------------------------------------------------------------
def is_homogeneous_list(lst):
if len(lst) == 0:
return True
return all(isinstance(x, type(lst[0])) for x in lst[1:])
#------------------------------------------------------------------------------
# 获取开始时间到结束时间之间的日期列表
#------------------------------------------------------------------------------
def gen_date_list(start_date, end_date):
sdate = datetime.strptime(start_date, "%Y-%m-%d")
edate = datetime.strptime(end_date, "%Y-%m-%d")
timestamp_list = list(pd.date_range(sdate, edate, freq='d'))
return [x.date().strftime("%Y-%m-%d") for x in timestamp_list]
#------------------------------------------------------------------------------
# 从2d表中提取indexer之外的属性列表
#------------------------------------------------------------------------------
def get_2d_table_columns_list(client, database, table):
if is_table_exist(client, database, table):
desc_query = "desc " + database + "." + table
desc_res = client.execute(desc_query)
return [x[0] for x in desc_res[1:]]
else:
return []
#------------------------------------------------------------------------------
# 从dataframe构建出 clickhouse表插入字段信息
#------------------------------------------------------------------------------
def gen_columns_str_from_dataframe(frame):
columns_str = "(" + "indexer,"
for x in list(frame.columns.values):
columns_str += x + ","
columns_str = columns_str[0:-1]
columns_str += ")"
return columns_str
#------------------------------------------------------------------------------
# 从dataframe构建出 clickhouse表字段信息
#------------------------------------------------------------------------------
def gen_table_fields_infos_from_dataframe(frame):
columns = list(frame.columns.values)
columns_types = [x for x in list(frame.dtypes)]
index_type = str(frame.index.dtype)
res = list(zip(columns, [str(x) for x in columns_types]))
res.insert(0, ('indexer', index_type))
table_fields_str = ""
for x in res:
field = x[0]
field_type = x[1]
if field_type == 'object':
table_fields_str += str(field) + " " + "String" + ", "
if "datetime" in field_type:
table_fields_str += str(field) + " " + "Datetime" + ", "
if field_type == 'float64':
table_fields_str += str(field) + " " + "Float64" + ", "
if field_type == 'float32':
table_fields_str += str(field) + " " + "Float32" + ", "
if field_type == 'int64':
table_fields_str += str(field) + " " + "Int64" + ", "
if field_type == 'int32':
table_fields_str += str(field) + " " + "Int32" + ", "
table_fields_str = table_fields_str[0:-2]
return table_fields_str
#------------------------------------------------------------------------------
# 检验日期字符串是否满足YYYY-MM-DD的形式
#------------------------------------------------------------------------------
def check_date_string(date_str):
pattern = '^\d{4}-\d{2}-\d{2}$'
return re.match(pattern, date_str)
def trans_date_string(date_str):
pattern = '^(\d{4})-(\d{2})-(\d{2})$'
res = re.search(pattern, date_str)
return res.group(1) + "_" + res.group(2) + "_" + res.group(3)
def recover_date_string(date_str):
pattern = '^(\d{4})_(\d{2})_(\d{2})$'
res = re.search(pattern, date_str)
return res.group(1) + "-" + res.group(2) + "-" + res.group(3)
#-------------------------------------------------------------------------------
# 查看某个用户是否存在
#-------------------------------------------------------------------------------
def is_user_exist(client, user_name):
all_users = client.execute("SHOW USERS")
for u in all_users:
if u[0] == user_name:
return True
return False
#------------------------------------------------------------------------------
# 验证一个数据库是否存在
#------------------------------------------------------------------------------
def is_database_exist(client, database_name):
res = client.execute("show databases")
for tp in res:
if tp[0] == database_name:
return True
return False
#------------------------------------------------------------------------------
# 验证某个因子数据表是否存在
# (test_passed)
#------------------------------------------------------------------------------
def is_table_exist(client, database_name, table_name):
res = False
show_query = "show tables from " + database_name + " like " + "\'%" + table_name + "%\'"
res = client.execute(show_query)
for tp in res:
if tp[0] == table_name:
res = True
return res
#------------------------------------------------------------------------------
# 获取一个数据库中所有table的列表
#------------------------------------------------------------------------------
def get_all_tables_in_database(client, database_name):
show_query = "show tables from " + database_name
return [x[0] for x in client.execute(show_query)]
#------------------------------------------------------------------------------
# 验证表中是否已经存在某个日期的数据
# (test_passed)
#------------------------------------------------------------------------------
def is_date_already_exist(client, database_name, table_name, date):
select_query = "select timestamp from " + database_name + "." + table_name + " where timestamp = " + "\'"+ date + "\'"
res = client.execute(select_query)
if len(res) > 0:
return True
return False
#------------------------------------------------------------------------------
# 获取因子数据表中第一行的时间戳
#------------------------------------------------------------------------------
def get_first_row_timestamp(client, database, table):
select_query = "select timestamp from " + database + "." + table
res = client.execute(select_query)
timestamps = [x[0] for x in res]
timestamps.sort()
if len(timestamps) > 0:
return timestamps[0]
else:
return ""
#------------------------------------------------------------------------------
# 获取因子数据表中最后一行的时间戳
#------------------------------------------------------------------------------
def get_last_row_timestamp(client, database, table):
select_query = "select timestamp from " + database + "." + table
res = client.execute(select_query)
timestamps = [x[0] for x in res]
timestamps.sort()
if len(timestamps) > 0:
return timestamps[-1]
else:
return ""
#------------------------------------------------------------------------------
# 获取因子数据表中所有时间戳组成的列表
#------------------------------------------------------------------------------
def get_all_row_timestamps(client, database, table):
select_query = "select timestamp from " + database + "." + table
res = client.execute(select_query)
return [x[0] for x in res]
#------------------------------------------------------------------------------
# 生成clickhouse插入语句
#------------------------------------------------------------------------------
def generate_insert_query(database, table, columns_str, data_str):
return "insert into " + database + "." + table + " " + columns_str + " values " + data_str
#------------------------------------------------------------------------------
# 检验日期以及日期范围是否有效
#------------------------------------------------------------------------------
def check_date_range(start_date, end_date):
#检查日期有效性性
if not check_date_string(start_date):
raise ValueError("start_date[%s] syntex error! yyyy-mm-dd!" % start_date)
if not check_date_string(end_date):
raise ValueError("end_date[%s] syntex error! yyyy-mm-dd!" % end_date)
if start_date > end_date:
raise ValueError("start_date[%s] > end_date[%s], error!" % (start_date, end_date))
#------------------------------------------------------------------------------
# 生成测试使用的因子dataframe
#------------------------------------------------------------------------------
def gen_factor_dataframe(symbols, start_date, daynum):
dt_start = datetime.strptime(start_date, "%Y-%m-%d")
date_list = []
for i in range(daynum):
dt_curr = dt_start + timedelta(days = i)
date_list.append(dt_curr.strftime("%Y-%m-%d"))
data = np.random.rand(len(symbols), len(date_list))
return | pd.DataFrame(data, index=symbols, columns=date_list) | pandas.DataFrame |
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
def drop_non_existent(data):
non_existent = ((data.isnull()) | (data==0)).all()
data = data.loc[:,~non_existent]
return data
def resample_time_series(data, time_resolution):
# Determine (maximum) time resolution of data
# (large distances between time stamps occur do to missing data)
data = data.sort_index()
data_time_resol = np.min(data.index[1:]-data.index[:-1])
# Upsample or downsample to obtain "time_resol" resolution
if time_resolution < data_time_resol:
ffill_limit = data_time_resol // time_resolution -1
data = data.resample(time_resolution).ffill(ffill_limit)
if time_resolution > data_time_resol:
data = data.resample(time_resolution).mean()
return data
def save_region_variable_contrib(contribution, region, path_to_hdf_file, path_to_doc_folder):
contrib_info = pd.DataFrame(columns=['region','variable','nan_ratio','number_neg_vals', 'mean'])
# Apply a final correction to the GB data
# (gen_other and gen_biomass were apparently split into two data series at some point)
if region=='GB BZN' and ('gen_other' in contribution.columns):
corrected_gen_other = contribution.gen_other.add(contribution.gen_biomass, fill_value=0)
contribution.loc[:, 'gen_other'] = corrected_gen_other
contribution.loc[:, 'gen_biomass'] = 0
# Drop non-existent columns
contribution = drop_non_existent(contribution)
# Save (region, variable)-contributions to seperate files and document download results
if not contribution.empty:
contribution.sort_index(inplace=True)
for var in contribution.columns:
contrib_name = '{}_{}'.format(region.replace('-', '_').replace(' ','_').replace('(','_').replace(')','_'),
var)
contribution.loc[:,var].to_hdf(path_to_hdf_file, key=contrib_name, mode='a', complevel=9)
# Extract infos, e.g. nan-ratio
contrib_info.loc[contrib_name,'nan_ratio'] = contribution.loc[:,var].isnull().sum()/contribution.shape[0]
contrib_info.loc[contrib_name, 'variable'] = var
contrib_info.loc[contrib_name, 'region'] = region
contrib_info.loc[contrib_name, 'number_neg_vals'] = (contribution.loc[:,var]<0).sum()
contrib_info.loc[contrib_name, 'mean'] = contribution.loc[:,var].mean()
# Document the download results
contribution.loc[:,var].plot()
plt.savefig(path_to_doc_folder+contrib_name+'.png', dpi=200, bbox_inches='tight')
plt.close()
return contrib_info
def extract_region_variable_contrib(entsoe_data_folder, file_type, region, variable_name, pivot_column_name, column_rename_dict,
start_time, end_time, time_resolution='1H'):
time_index = pd.date_range(start_time,end_time,freq=time_resolution)
print('--------- ',region, ' ----------')
contribution = pd.DataFrame(columns=column_rename_dict.values(),
index=time_index, data=np.nan)
# Iterate over files containing different months and years
for date_index in | pd.date_range(start_time,end_time,freq='M') | pandas.date_range |
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
concat,
date_range,
isna,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io.pytables import Term
pytestmark = pytest.mark.single
def test_select_columns_in_where(setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_select_with_dups(setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
| tm.assert_frame_equal(result, expected, by_blocks=True) | pandas._testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
@file:compete_pctr_yestoday.py
@time:2019/6/12 14:53
@author:Tangj
@software:Pycharm
@Desc
"""
'''
统计的仍然是pctr,ecpm和bid等的均值信息,用的是前一天的平移过来的pctr等信息,这里的平移的前一天的是早就已经统计好的信息,是日志中的总的均值等信息
'''
import pandas as pd
import numpy as np
import time
# request集合的大小,uid的集合数量,这些特征,应该是用全部的日志文件,不是只有曝光成功的日志文件
name = ['track_log_20190410', 'track_log_20190411', 'track_log_20190412', 'track_log_20190413', 'track_log_20190414',
'track_log_20190415', 'track_log_20190416', 'track_log_20190417', 'track_log_20190418', 'track_log_20190419',
'track_log_20190420', 'track_log_20190421',
'track_log_20190422']
using_rate = pd.read_csv('../usingData/feature/total_ad_pctr.csv')
total_fea = pd.DataFrame()
continue_num = 0
flag = 0
for na in name:
ttt = time.time()
print(time.localtime(ttt))
nas = na.split('_')
day = nas[-1]
print(day, ' processing')
data = pd.read_table('../metaData/metaTrain/' + na + '.out', header=None)
compete = data[4].values
uid_list = data[2].values
day2 = day
if flag == 0:
day = int(day)
flag = 1
else:
day = int(day) - 1
every_day_rate = using_rate[using_rate['day'] == int(day)]
everyday_aid = every_day_rate['ad_id'].values
everyday_bid = every_day_rate['bid'].values
everyday_pctr = every_day_rate['pctr'].values
everyday_total = every_day_rate['total_ecpm'].values
everyday_quality = every_day_rate['quality_ecpm'].values
every_bid = {}
every_pctr = {}
every_total = {}
every_quality = {}
for i, k in enumerate(everyday_aid):
if k not in every_bid:
every_bid[k] = everyday_bid[i]
every_pctr[k] = everyday_pctr[i]
every_total[k] = everyday_total[i]
every_quality[k] = everyday_quality[i]
every_bid[-1] = 0.0
every_pctr[-1] = 0.0
every_total[-1] = 0.0
every_quality[-1] = 0.0
# 取出对应的aid
def deal(x):
xx = x.split(',')
t = xx[0]
return t
com_bid = []
com_pctr = []
com_quality = []
com_total = []
new_aid = []
def fx(x):
t = x
xx = t.split(',')
if int(xx[-2]) == 1:
t = -100
if float(xx[3]) < 0:
t = -100
return t
for i, ad_list in enumerate(compete):
# 先把被过滤的广告找出来
adss = ad_list.split(';')
ads_temp = list(map(fx, adss))
temp_data = | pd.DataFrame() | pandas.DataFrame |
# encoding=utf-8
from xml.etree import ElementTree as ET
import requests
from pandas import DataFrame, concat
from pandas.io.json import json_normalize
def coerce_to_dataframe(x, method):
if 'pet' in method or 'Pet' in method:
res = media_df = opt_df = breed_df = DataFrame()
if method == 'pet.get' or method == 'pet.getRandom':
res, breed_df, opt_df, media_df = pet_find_get_coerce(x['petfinder']['pet'])
elif method == 'pet.find' or method == 'shelter.getPets':
res = media_df = opt_df = breed_df = DataFrame()
try:
if x['petfinder']['pets'] == {}:
return DataFrame()
except KeyError:
return DataFrame()
else:
if isinstance(x['petfinder']['pets']['pet'], list):
for i in x['petfinder']['pets']['pet']:
pet, breed, opt, media = pet_find_get_coerce(i)
res = res.append(pet)
breed_df = breed_df.append(breed)
opt_df = opt_df.append(opt)
media_df = media_df.append(media)
else:
res, breed_df, opt_df, media_df = pet_find_get_coerce(x['petfinder']['pets']['pet'])
breed_df.columns = ['breed' + str(col) for col in breed_df.columns]
opt_df.columns = ['status' + str(col) for col in opt_df.columns]
media_df.columns = ['photos' + str(col) for col in media_df.columns]
df = concat([res, breed_df, opt_df, media_df], axis=1)
try:
del df['breeds.breed']
del df['breeds.breed.$t']
del df['breeds.breed']
del df['media.photos.photo']
except KeyError:
pass
else:
if method == 'shelter.find' or method == 'shelter.listByBreed':
try:
df = json_normalize(x['petfinder']['shelters']['shelter'])
except (KeyError, ValueError):
df = empty_shelter_df()
elif method == 'shelter.get':
try:
df = json_normalize(x['petfinder']['shelter'])
except (KeyError, ValueError):
df = DataFrame({'shelterId': 'shelter opt-out'}, index=[0])
else:
raise ValueError('unknown API method')
df.columns = [col.replace('.$t', '') for col in df.columns]
df.columns = [col.replace('contact.', '') for col in df.columns]
df = df[df.columns[~df.columns.str.contains('options')]]
return df
def pet_find_get_coerce(x):
res = media_df = opt_df = breed_df = DataFrame()
try:
breed = DataFrame(json_normalize(x['breeds']['breed'])['$t'].to_dict(), index=[0])
except (KeyError, TypeError):
breed = DataFrame(['na'], columns=[0])
try:
media = DataFrame(json_normalize(x['media']['photos']['photo'])['$t'].to_dict(), index=[0])
except (KeyError, TypeError):
media = | DataFrame(['na'], columns=[0]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# 加载相关模块和库
import sys
import io
#改变标准输出的默认编码
sys.stdout=io.TextIOWrapper(sys.stdout.buffer,encoding='utf8')
"""
本次实战选择决策书作为分类算法,因此可以避免问题
至此来观察数据特征,以更好的选择算法
乘客等级-获救比例
"""
import pandas as pd #数据分析
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
data_train = | pd.read_csv("data/train.csv") | pandas.read_csv |
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calendar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tseries import offsets
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz
from pandas.errors import OutOfBoundsDatetime
from pandas.compat import long, PY3
from pandas.compat.numpy import np_datetime64_compat
from pandas import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert isinstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert getattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert getattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_names(self, data, time_locale):
# GH 17354
# Test .weekday_name, .day_name(), .month_name
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
assert data.weekday_name == 'Monday'
if time_locale is None:
expected_day = 'Monday'
expected_month = 'August'
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
assert data.day_name(time_locale) == expected_day
assert data.month_name(time_locale) == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
@pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo'])
def test_is_leap_year(self, tz):
# GH 13727
dt = Timestamp('2000-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp('1999-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
dt = Timestamp('2004-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
dt = Timestamp('2100-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1 # ISO standard
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52 # ISO standard
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
result = np.array([Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (
2005, 1, 1), (2005, 1, 2)]])
assert (result == [52, 52, 53, 53]).all()
class TestTimestampConstructors(object):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime(2014, 7, 1, 9, 0, 0, 8),
base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'),
base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date,
tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert | conversion.pydt_to_i8(result) | pandas._libs.tslibs.conversion.pydt_to_i8 |
"""
Author: <NAME>
"""
import pandas as pd
class FRED(object):
"""
Wrapper for the data API of the FRED
"""
def fetch(self, series_id, initial_date=None, end_date=None):
"""
Grabs series from the FRED website and returns a pandas dataframe
:param series_id: string with series ID, list of strings of the series ID or dict with series ID as keys
:param initial_date: string in the format 'yyyy-mm-dd' (optional)
:param end_date: string in the format 'yyyy-mm-dd' (optional)
:return: pandas DataFrame withe the requested series. If a dict is passed as series ID, the dict values are used
as column names.
"""
if type(series_id) is list:
df = pd.DataFrame()
for cod in series_id:
single_series = self._fetch_single_code(cod)
df = pd.concat([df, single_series], axis=1)
df.sort_index(inplace=True)
elif type(series_id) is dict:
df = pd.DataFrame()
for cod in series_id.keys():
single_series = self._fetch_single_code(cod)
df = pd.concat([df, single_series], axis=1)
df.columns = series_id.values()
else:
df = self._fetch_single_code(series_id)
df = self._correct_dates(df, initial_date, end_date)
return df
@staticmethod
def _fetch_single_code(series_id):
url = r'https://fred.stlouisfed.org/data/' + series_id + '.txt'
df = pd.read_csv(url, sep='\n')
series_start = df[df[df.columns[0]].str.contains('DATE\s+VALUE')].index[0] + 1
df = df.loc[series_start:]
df = df[df.columns[0]].str.split('\s+', expand=True)
df = df[~(df[1] == '.')]
df = pd.DataFrame(data=df[1].values.astype(float),
index= | pd.to_datetime(df[0]) | pandas.to_datetime |
from io import StringIO
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from astropy.time import Time
from astropy.utils.data import download_file
# Endpoint to obtain ephemerides from JPL/Horizons
HORIZONS_URL = ("https://ssd.jpl.nasa.gov/horizons_batch.cgi?"
"batch=1&COMMAND=%27{target}%27&MAKE_EPHEM=%27YES%27%20&"
"CENTER=%27500%27&TABLE_TYPE=%27OBSERVER%27&"
"START_TIME=%27{start}%27&STOP_TIME=%27{stop}%27&"
"STEP_SIZE=%27{step_size}%27%20&ANG_FORMAT=%27DEG%27&"
"QUANTITIES=%2719,20,23%27&CSV_FORMAT=%27YES%27""")
class EphemFailure(Exception):
# JPL/Horizons ephemerides could not be retrieved
pass
def jpl2pandas(path):
"""Converts a csv ephemeris file from JPL/Horizons into a DataFrame.
Parameters
----------
path : str
Must be in JPL/Horizons' CSV-like format.
Returns
-------
ephemeris : `pandas.DataFrame` object
"""
jpl = open(path).readlines()
csv_started = False
csv = StringIO()
for idx, line in enumerate(jpl):
if line.startswith("$$EOE"): # "End of ephemerides"
break
if csv_started:
csv.write(line)
if line.startswith("$$SOE"): # "Start of ephemerides"
csv.write(jpl[idx - 2]) # Header line
csv_started = True
if len(csv.getvalue()) < 1:
jpl_output = "\n".join([line
for line in jpl])
msg = jpl_output
msg += ("Uhoh, something went wrong! "
"Most likely, JPL/Horizons did not recognize the target."
" Check their response above to understand why.")
raise EphemFailure(msg)
csv.seek(0)
df = pd.read_csv(csv)
# Simplify column names for user-friendlyness;
# 'APmag' is the apparent magnitude which is returned for asteroids;
# 'Tmag' is the total magnitude returned for comets:
df.index.name = 'date'
df = df.rename(columns={' Date__(UT)__HR:MN': "date",
' r': 'r',
' delta': 'delta',
' S-O-T': 'elongation'})
df['date'] = | pd.to_datetime(df.date) | pandas.to_datetime |
import os
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
class CSVLog:
def __init__(self, file, columns=None, autoflush=True, verbose=False, overwrite=False):
self.file = file
self.autoflush = autoflush
self.verbose = verbose
if os.path.exists(file) and not overwrite:
self.tell('Loading existing file from {}'.format(file))
self.df = pd.read_csv(file, sep='\t')
self.columns = sorted(self.df.columns.values.tolist())
else:
self.tell('File {} does not exist or overwrite=True. Creating new frame.'.format(file))
assert columns is not None, 'columns cannot be None'
self.columns = sorted(columns)
dir = os.path.dirname(self.file)
if dir and not os.path.exists(dir): os.makedirs(dir)
self.df = | pd.DataFrame(columns=self.columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import datetime
import numpy as np
import os
import pandas as pd
import pandas.testing as tm
from fastparquet import ParquetFile
from fastparquet import write, parquet_thrift
from fastparquet import writer, encoding
from pandas.testing import assert_frame_equal
from pandas.api.types import CategoricalDtype
import pytest
from fastparquet.util import default_mkdirs
from .util import s3, tempdir, sql, TEST_DATA
from fastparquet import cencoding
def test_uvarint():
values = np.random.randint(0, 15000, size=100)
buf = np.zeros(30, dtype=np.uint8)
o = cencoding.NumpyIO(buf)
for v in values:
o.seek(0)
cencoding.encode_unsigned_varint(v, o)
o.seek(0)
out = cencoding.read_unsigned_var_int(o)
assert v == out
def test_bitpack():
for _ in range(10):
values = np.random.randint(0, 15000, size=np.random.randint(10, 100),
dtype=np.int32)
width = cencoding.width_from_max_int(values.max())
buf = np.zeros(900, dtype=np.uint8)
o = cencoding.NumpyIO(buf)
cencoding.encode_bitpacked(values, width, o)
o.seek(0)
head = cencoding.read_unsigned_var_int(o)
buf2 = np.zeros(300, dtype=np.int32)
out = cencoding.NumpyIO(buf2.view("uint8"))
cencoding.read_bitpacked(o, head, width, out)
assert (values == buf2[:len(values)]).all()
assert buf2[len(values):].sum() == 0 # zero padding
assert out.tell() // 8 - len(values) < 8
def test_length():
lengths = np.random.randint(0, 15000, size=100)
buf = np.zeros(900, dtype=np.uint8)
o = cencoding.NumpyIO(buf)
for l in lengths:
o.seek(0)
o.write_int(l)
o.seek(0)
out = buf.view('int32')[0]
assert l == out
def test_rle_bp():
for _ in range(10):
values = np.random.randint(0, 15000, size=np.random.randint(10, 100),
dtype=np.int32)
buf = np.empty(len(values) + 5, dtype=np.int32)
out = cencoding.NumpyIO(buf.view('uint8'))
buf2 = np.zeros(900, dtype=np.uint8)
o = cencoding.NumpyIO(buf2)
width = cencoding.width_from_max_int(values.max())
# without length
cencoding.encode_rle_bp(values, width, o)
l = o.tell()
o.seek(0)
cencoding.read_rle_bit_packed_hybrid(o, width, length=l, o=out)
assert (buf[:len(values)] == values).all()
def test_roundtrip_s3(s3):
data = pd.DataFrame({'i32': np.arange(1000, dtype=np.int32),
'i64': np.arange(1000, dtype=np.int64),
'f': np.arange(1000, dtype=np.float64),
'bhello': np.random.choice([b'hello', b'you',
b'people'], size=1000).astype("O")})
data['hello'] = data.bhello.str.decode('utf8')
data['bcat'] = data.bhello.astype('category')
data.loc[100, 'f'] = np.nan
data['cat'] = data.hello.astype('category')
noop = lambda x: True
myopen = s3.open
write(TEST_DATA+'/temp_parq', data, file_scheme='hive',
row_group_offsets=[0, 500], open_with=myopen, mkdirs=noop)
myopen = s3.open
pf = ParquetFile(TEST_DATA+'/temp_parq', open_with=myopen)
df = pf.to_pandas(categories=['cat', 'bcat'])
for col in data:
assert (df[col] == data[col])[~df[col].isnull()].all()
@pytest.mark.parametrize('scheme', ['simple', 'hive'])
@pytest.mark.parametrize('row_groups', [[0], [0, 500]])
@pytest.mark.parametrize('comp', ['SNAPPY', None, 'GZIP'])
def test_roundtrip(tempdir, scheme, row_groups, comp):
data = pd.DataFrame({'i32': np.arange(1000, dtype=np.int32),
'i64': np.arange(1000, dtype=np.int64),
'u64': np.arange(1000, dtype=np.uint64),
'f': np.arange(1000, dtype=np.float64),
'bhello': np.random.choice([b'hello', b'you',
b'people'], size=1000).astype("O")})
data['a'] = np.array([b'a', b'b', b'c', b'd', b'e']*200, dtype="S1")
data['aa'] = data['a'].map(lambda x: 2*x).astype("S2")
data['hello'] = data.bhello.str.decode('utf8')
data['bcat'] = data.bhello.astype('category')
data['cat'] = data.hello.astype('category')
fname = os.path.join(tempdir, 'test.parquet')
write(fname, data, file_scheme=scheme, row_group_offsets=row_groups,
compression=comp)
r = ParquetFile(fname)
assert r.fmd.num_rows == r.count() == 1000
df = r.to_pandas()
assert data.cat.dtype == 'category'
for col in r.columns:
assert (df[col] == data[col]).all()
# tests https://github.com/dask/fastparquet/issues/250
assert isinstance(data[col][0], type(df[col][0]))
def test_bad_coltype(tempdir):
df = pd.DataFrame({'0': [1, 2], (0, 1): [3, 4]})
fn = os.path.join(tempdir, 'temp.parq')
with pytest.raises((ValueError, TypeError)) as e:
write(fn, df)
assert "tuple" in str(e.value)
def test_bad_col(tempdir):
df = pd.DataFrame({'x': [1, 2]})
fn = os.path.join(tempdir, 'temp.parq')
with pytest.raises(ValueError) as e:
write(fn, df, has_nulls=['y'])
@pytest.mark.parametrize('scheme', ('simple', 'hive'))
def test_roundtrip_complex(tempdir, scheme,):
import datetime
data = pd.DataFrame({'ui32': np.arange(1000, dtype=np.uint32),
'i16': np.arange(1000, dtype=np.int16),
'ui8': np.array([1, 2, 3, 4]*250, dtype=np.uint8),
'f16': np.arange(1000, dtype=np.float16),
'dicts': [{'oi': 'you'}] * 1000,
't': [datetime.datetime.now()] * 1000,
'td': [datetime.timedelta(seconds=1)] * 1000,
'bool': np.random.choice([True, False], size=1000)
})
data.loc[100, 't'] = None
fname = os.path.join(tempdir, 'test.parquet')
write(fname, data, file_scheme=scheme)
r = ParquetFile(fname)
df = r.to_pandas()
for col in r.columns:
assert (df[col] == data[col])[~data[col].isnull()].all()
@pytest.mark.parametrize('df', [
pd.util.testing.makeMixedDataFrame(),
pd.DataFrame({'x': pd.date_range('3/6/2012 00:00',
periods=10, freq='H', tz='Europe/London')}),
pd.DataFrame({'x': pd.date_range('3/6/2012 00:00',
periods=10, freq='H', tz='Europe/Berlin')}),
pd.DataFrame({'x': pd.date_range('3/6/2012 00:00',
periods=10, freq='H', tz='UTC')}),
pd.DataFrame({'x': pd.date_range('3/6/2012 00:00',
periods=10, freq='H', tz=datetime.timezone.min)}),
pd.DataFrame({'x': pd.date_range('3/6/2012 00:00',
periods=10, freq='H', tz=datetime.timezone.max)})
])
def test_datetime_roundtrip(tempdir, df, capsys):
fname = os.path.join(tempdir, 'test.parquet')
w = False
if 'x' in df and 'Europe/' in str(df.x.dtype.tz):
with pytest.warns(UserWarning) as w:
write(fname, df)
else:
write(fname, df)
r = ParquetFile(fname)
if w:
assert any("UTC" in str(wm.message) for wm in w.list)
df2 = r.to_pandas()
pd.testing.assert_frame_equal(df, df2, check_categorical=False)
def test_nulls_roundtrip(tempdir):
fname = os.path.join(tempdir, 'temp.parq')
data = pd.DataFrame({'o': np.random.choice(['hello', 'world', None],
size=1000)})
data['cat'] = data['o'].astype('category')
writer.write(fname, data, has_nulls=['o', 'cat'])
r = ParquetFile(fname)
df = r.to_pandas()
for col in r.columns:
assert (df[col] == data[col])[~data[col].isnull()].all()
assert (data[col].isnull() == df[col].isnull()).all()
def test_decimal_roundtrip(tempdir):
import decimal
def decimal_convert(x):
return decimal.Decimal(x)
fname = os.path.join(tempdir, 'decitemp.parq')
data = pd.DataFrame({'f64': np.arange(10000000, 10001000, dtype=np.float64) / 100000,
'f16': np.arange(1000, dtype=np.float16) /10000
})
data['f64']=data['f64'].apply(decimal_convert)
data['f16']=data['f16'].apply(decimal_convert)
writer.write(fname, data)
r = ParquetFile(fname)
df = r.to_pandas()
for col in r.columns:
assert (data[col] == df[col]).all()
def test_make_definitions_with_nulls():
for _ in range(10):
out = np.empty(1000, dtype=np.int32)
o = cencoding.NumpyIO(out.view("uint8"))
data = pd.Series(np.random.choice([True, None],
size=np.random.randint(1, 1000)))
defs, d2 = writer.make_definitions(data, False)
buf = np.frombuffer(defs, dtype=np.uint8)
i = cencoding.NumpyIO(buf)
cencoding.read_rle_bit_packed_hybrid(i, 1, length=0, o=o)
assert (out[:len(data)] == ~data.isnull()).sum()
def test_make_definitions_without_nulls():
for _ in range(100):
out = np.empty(10000, dtype=np.int32)
o = cencoding.NumpyIO(out.view("uint8"))
data = pd.Series([True] * np.random.randint(1, 10000))
defs, d2 = writer.make_definitions(data, True)
l = len(data) << 1
p = 1
while l > 127:
l >>= 7
p += 1
assert len(defs) == 4 + p + 1 # "length", num_count, value
i = cencoding.NumpyIO(np.frombuffer(defs, dtype=np.uint8))
cencoding.read_rle_bit_packed_hybrid(i, 1, length=0, o=o)
assert (out[:o.tell() // 4] == ~data.isnull()).sum()
# class mock:
# def is_required(self, *args):
# return False
# def max_definition_level(self, *args):
# return 1
# def __getattr__(self, item):
# return None
# halper, metadata = mock(), mock()
def test_empty_row_group(tempdir):
fname = os.path.join(tempdir, 'temp.parq')
data = pd.DataFrame({'o': np.random.choice(['hello', 'world'],
size=1000)})
writer.write(fname, data, row_group_offsets=[0, 900, 1800])
pf = ParquetFile(fname)
assert len(pf.row_groups) == 2
def test_int_rowgroups(tempdir):
df = pd.DataFrame({'a': [1]*100})
fname = os.path.join(tempdir, 'test.parq')
writer.write(fname, df, row_group_offsets=30)
r = ParquetFile(fname)
assert [rg.num_rows for rg in r.row_groups] == [25, 25, 25, 25]
writer.write(fname, df, row_group_offsets=33)
r = ParquetFile(fname)
assert [rg.num_rows for rg in r.row_groups] == [25, 25, 25, 25]
writer.write(fname, df, row_group_offsets=34)
r = ParquetFile(fname)
assert [rg.num_rows for rg in r.row_groups] == [34, 34, 32]
writer.write(fname, df, row_group_offsets=35)
r = ParquetFile(fname)
assert [rg.num_rows for rg in r.row_groups] == [34, 34, 32]
@pytest.mark.parametrize('scheme', ['hive', 'drill'])
def test_groups_roundtrip(tempdir, scheme):
df = pd.DataFrame({'a': np.random.choice(['a', 'b', None], size=1000),
'b': np.random.randint(0, 64000, size=1000),
'c': np.random.choice([True, False], size=1000)})
writer.write(tempdir, df, partition_on=['a', 'c'], file_scheme=scheme)
r = ParquetFile(tempdir)
assert r.columns == ['b']
out = r.to_pandas()
if scheme == 'drill':
assert set(r.cats) == {'dir0', 'dir1'}
assert set(out.columns) == {'b', 'dir0', 'dir1'}
out.rename(columns={'dir0': 'a', 'dir1': 'c'}, inplace=True)
for i, row in out.iterrows():
assert row.b in list(df[(df.a == row.a) & (df.c == row.c)].b)
writer.write(tempdir, df, row_group_offsets=[0, 50], partition_on=['a', 'c'],
file_scheme=scheme)
r = ParquetFile(tempdir)
assert r.fmd.num_rows == r.count() == sum(~df.a.isnull())
assert len(r.row_groups) == 8
out = r.to_pandas()
if scheme == 'drill':
assert set(out.columns) == {'b', 'dir0', 'dir1'}
out.rename(columns={'dir0': 'a', 'dir1': 'c'}, inplace=True)
for i, row in out.iterrows():
assert row.b in list(df[(df.a==row.a)&(df.c==row.c)].b)
def test_groups_iterable(tempdir):
df = pd.DataFrame({'a': np.random.choice(['aaa', 'bbb', None], size=1000),
'b': np.random.randint(0, 64000, size=1000),
'c': np.random.choice([True, False], size=1000)})
writer.write(tempdir, df, partition_on=['a'], file_scheme='hive')
r = ParquetFile(tempdir)
assert r.columns == ['b', 'c']
out = r.to_pandas()
for i, row in out.iterrows():
assert row.b in list(df[(df.a==row.a)&(df.c==row.c)].b)
def test_empty_groupby(tempdir):
df = pd.DataFrame({'a': np.random.choice(['a', 'b', None], size=1000),
'b': np.random.randint(0, 64000, size=1000),
'c': np.random.choice([True, False], size=1000)})
df.loc[499:, 'c'] = True # no False in second half
writer.write(tempdir, df, partition_on=['a', 'c'], file_scheme='hive',
row_group_offsets=[0, 500])
r = ParquetFile(tempdir)
assert r.count() == sum(~df.a.isnull())
assert len(r.row_groups) == 6
out = r.to_pandas()
for i, row in out.iterrows():
assert row.b in list(df[(df.a==row.a)&(df.c==row.c)].b)
def test_too_many_partition_columns(tempdir):
df = pd.DataFrame({'a': np.random.choice(['a', 'b', 'c'], size=1000),
'c': np.random.choice([True, False], size=1000)})
with pytest.raises(ValueError) as ve:
writer.write(tempdir, df, partition_on=['a', 'c'], file_scheme='hive')
assert "Cannot include all columns" in str(ve.value)
def test_read_partitioned_and_write_with_empty_partions(tempdir):
df = pd.DataFrame({'a': np.random.choice(['a', 'b', 'c'], size=1000),
'c': np.random.choice([True, False], size=1000)})
writer.write(tempdir, df, partition_on=['a'], file_scheme='hive')
df_filtered = ParquetFile(tempdir).to_pandas(
filters=[('a', '==', 'b')]
)
writer.write(tempdir, df_filtered, partition_on=['a'], file_scheme='hive')
df_loaded = ParquetFile(tempdir).to_pandas()
tm.assert_frame_equal(df_filtered, df_loaded, check_categorical=False)
@pytest.mark.parametrize('compression', ['GZIP',
'gzip',
None,
{'x': 'GZIP'},
{'y': 'gzip', 'x': None}])
def test_write_compression_dict(tempdir, compression):
df = pd.DataFrame({'x': [1, 2, 3],
'y': [1., 2., 3.]})
fn = os.path.join(tempdir, 'tmp.parq')
writer.write(fn, df, compression=compression)
r = ParquetFile(fn)
df2 = r.to_pandas()
tm.assert_frame_equal(df, df2, check_categorical=False, check_dtype=False)
def test_write_compression_schema(tempdir):
df = pd.DataFrame({'x': [1, 2, 3],
'y': [1., 2., 3.]})
fn = os.path.join(tempdir, 'tmp.parq')
writer.write(fn, df, compression={'x': 'gzip'})
r = ParquetFile(fn)
assert all(c.meta_data.codec for row in r.row_groups
for c in row.columns
if c.meta_data.path_in_schema == ['x'])
assert not any(c.meta_data.codec for row in r.row_groups
for c in row.columns
if c.meta_data.path_in_schema == ['y'])
def test_index(tempdir):
import json
fn = os.path.join(tempdir, 'tmp.parq')
df = pd.DataFrame({'x': [1, 2, 3],
'y': [1., 2., 3.]},
index=pd.Index([10, 20, 30], name='z'))
writer.write(fn, df)
pf = ParquetFile(fn)
assert set(pf.columns) == {'x', 'y', 'z'}
meta = json.loads(pf.key_value_metadata[b'pandas'])
assert meta['index_columns'] == ['z']
out = pf.to_pandas()
assert out.index.name == 'z'
pd.testing.assert_frame_equal(df, out, check_dtype=False)
out = pf.to_pandas(index=False)
assert out.index.name is None
assert (out.index == range(3)).all()
assert (out.z == df.index).all()
def test_duplicate_columns(tempdir):
fn = os.path.join(tempdir, 'tmp.parq')
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list('aaa'))
with pytest.raises(ValueError) as e:
write(fn, df)
assert 'duplicate' in str(e.value)
@pytest.mark.parametrize('cmp', [None, 'gzip'])
def test_cmd_bytesize(tempdir, cmp):
fn = os.path.join(tempdir, 'tmp.parq')
df = pd.DataFrame({'s': ['a', 'b']}, dtype='category')
write(fn, df, compression=cmp)
pf = ParquetFile(fn)
chunk = pf.row_groups[0].columns[0]
cmd = chunk.meta_data
csize = cmd.total_compressed_size
f = cencoding.NumpyIO(open(fn, 'rb').read())
f.seek(cmd.dictionary_page_offset)
ph = cencoding.from_buffer(f, name="PageHeader")
c1 = ph.compressed_page_size
f.seek(c1, 1)
ph = cencoding.from_buffer(f, "PageHeader")
c2 = ph.compressed_page_size
f.seek(c2, 1)
assert csize == f.tell() - cmd.dictionary_page_offset
def test_dotted_column(tempdir):
fn = os.path.join(tempdir, 'tmp.parq')
df = pd.DataFrame({'x.y': [1, 2, 3],
'y': [1., 2., 3.]})
writer.write(fn, df)
out = ParquetFile(fn).to_pandas()
assert list(out.columns) == ['x.y', 'y']
def test_naive_index(tempdir):
fn = os.path.join(tempdir, 'tmp.parq')
df = pd.DataFrame({'x': [1, 2, 3],
'y': [1., 2., 3.]})
writer.write(fn, df)
r = ParquetFile(fn)
assert set(r.columns) == {'x', 'y'}
writer.write(fn, df, write_index=True)
r = ParquetFile(fn)
assert set(r.columns) == {'x', 'y', 'index'}
def test_text_convert(tempdir):
df = pd.DataFrame({'a': [u'π'] * 100,
'b': [b'a'] * 100})
fn = os.path.join(tempdir, 'tmp.parq')
write(fn, df, fixed_text={'a': 2, 'b': 1})
pf = ParquetFile(fn)
assert pf._schema[1].type == parquet_thrift.Type.FIXED_LEN_BYTE_ARRAY
assert pf._schema[1].type_length == 2
assert pf._schema[2].type == parquet_thrift.Type.FIXED_LEN_BYTE_ARRAY
assert pf._schema[2].type_length == 1
assert pf.statistics['max']['a'] == [u'π']
df2 = pf.to_pandas()
tm.assert_frame_equal(df, df2, check_categorical=False)
write(fn, df)
pf = ParquetFile(fn)
assert pf._schema[1].type == parquet_thrift.Type.BYTE_ARRAY
assert pf._schema[2].type == parquet_thrift.Type.BYTE_ARRAY
assert pf.statistics['max']['a'] == [u'π']
df2 = pf.to_pandas()
tm.assert_frame_equal(df, df2, check_categorical=False)
write(fn, df, fixed_text={'a': 2})
pf = ParquetFile(fn)
assert pf._schema[1].type == parquet_thrift.Type.FIXED_LEN_BYTE_ARRAY
assert pf._schema[2].type == parquet_thrift.Type.BYTE_ARRAY
assert pf.statistics['max']['a'] == [u'π']
df2 = pf.to_pandas()
tm.assert_frame_equal(df, df2, check_categorical=False)
def test_null_time(tempdir):
"""Test reading a file that contains null records."""
tmp = str(tempdir)
expected = pd.DataFrame({"t": [np.timedelta64(), np.timedelta64('NaT')]})
fn = os.path.join(tmp, "test-time-null.parquet")
# with NaT
write(fn, expected, has_nulls=False)
p = ParquetFile(fn)
data = p.to_pandas()
assert (data['t'] == expected['t'])[~expected['t'].isnull()].all()
assert sum(data['t'].isnull()) == sum(expected['t'].isnull())
# with NULL
write(fn, expected, has_nulls=True)
p = ParquetFile(fn)
data = p.to_pandas()
assert (data['t'] == expected['t'])[~expected['t'].isnull()].all()
assert sum(data['t'].isnull()) == sum(expected['t'].isnull())
@pytest.mark.parametrize(
"pnull", [True, False]
)
def test_auto_null_object(tempdir, pnull):
tmp = str(tempdir)
df = pd.DataFrame({'a': [1, 2, 3, 0],
'aa': pd.Series([1, 2, 3, None], dtype=object),
'b': [1., 2., 3., np.nan],
'c': pd.to_timedelta([1, 2, 3, np.nan], unit='ms'),
'd': ['a', 'b', 'c', None],
'f': [True, False, True, True],
'ff': [True, False, None, True]}) # object
df['e'] = df['d'].astype('category')
df['bb'] = df['b'].astype('object')
df['aaa'] = df['a'].astype('object')
object_cols = ['d', 'ff', 'bb', 'aaa', 'aa']
test_cols = list(set(df) - set(object_cols)) + ['d']
fn = os.path.join(tmp, "test.parq")
with pytest.raises(ValueError):
write(fn, df, has_nulls=False)
write(fn, df, has_nulls=True)
pf = ParquetFile(fn, pandas_nulls=pnull)
for col in pf._schema[1:]:
assert col.repetition_type == parquet_thrift.FieldRepetitionType.OPTIONAL
df2 = pf.to_pandas(categories=['e'])
tm.assert_frame_equal(df[test_cols], df2[test_cols], check_categorical=False,
check_dtype=False)
tm.assert_frame_equal(df[['bb']].astype('float64'), df2[['bb']])
tm.assert_frame_equal(df[['aaa']].astype('int64'), df2[['aaa']])
if pnull:
tm.assert_frame_equal(df[['aa']].astype('Int64'), df2[['aa']])
tm.assert_frame_equal(df[['ff']].astype("boolean"), df2[['ff']])
else:
tm.assert_frame_equal(df[['aa']].astype('float'), df2[['aa']])
tm.assert_frame_equal(df[['ff']].astype("float"), df2[['ff']])
# not giving any value same as has_nulls=True
write(fn, df)
pf = ParquetFile(fn)
for col in pf._schema[1:]:
assert col.repetition_type == parquet_thrift.FieldRepetitionType.OPTIONAL
df2 = pf.to_pandas(categories=['e'])
tm.assert_frame_equal(df[test_cols], df2[test_cols], check_categorical=False,
check_dtype=False)
tm.assert_frame_equal(df[['ff']].astype('boolean'), df2[['ff']])
tm.assert_frame_equal(df[['bb']].astype('float64'), df2[['bb']])
tm.assert_frame_equal(df[['aaa']].astype('int64'), df2[['aaa']])
# 'infer' is new recommended auto-null
write(fn, df, has_nulls='infer')
pf = ParquetFile(fn)
for col in pf._schema[1:]:
if col.name in object_cols:
assert col.repetition_type == parquet_thrift.FieldRepetitionType.OPTIONAL
else:
assert col.repetition_type == parquet_thrift.FieldRepetitionType.REQUIRED
df2 = pf.to_pandas()
tm.assert_frame_equal(df[test_cols], df2[test_cols], check_categorical=False)
tm.assert_frame_equal(df[['ff']].astype('boolean'), df2[['ff']])
tm.assert_frame_equal(df[['bb']].astype('float64'), df2[['bb']])
tm.assert_frame_equal(df[['aaa']].astype('int64'), df2[['aaa']])
# nut legacy None still works
write(fn, df, has_nulls=None)
pf = ParquetFile(fn)
for col in pf._schema[1:]:
if col.name in object_cols:
assert col.repetition_type == parquet_thrift.FieldRepetitionType.OPTIONAL
else:
assert col.repetition_type == parquet_thrift.FieldRepetitionType.REQUIRED
df2 = pf.to_pandas()
tm.assert_frame_equal(df[test_cols], df2[test_cols], check_categorical=False)
tm.assert_frame_equal(df[['ff']].astype('boolean'), df2[['ff']])
tm.assert_frame_equal(df[['bb']].astype('float64'), df2[['bb']])
tm.assert_frame_equal(df[['aaa']].astype('int64'), df2[['aaa']])
@pytest.mark.parametrize('n', (10, 127, 2**8 + 1, 2**16 + 1))
def test_many_categories(tempdir, n):
tmp = str(tempdir)
cats = np.arange(n)
codes = np.random.randint(0, n, size=1000000)
df = pd.DataFrame({'x': pd.Categorical.from_codes(codes, cats), 'y': 1})
fn = os.path.join(tmp, "test.parq")
write(fn, df, has_nulls=False)
pf = ParquetFile(fn)
out = pf.to_pandas(categories={'x': n})
tm.assert_frame_equal(df, out, check_categorical=False, check_dtype=False)
df.set_index('x', inplace=True)
write(fn, df, has_nulls=False, write_index=True)
pf = ParquetFile(fn)
out = pf.to_pandas(categories={'x': n}, index='x')
assert (out.index == df.index).all()
assert (out.y == df.y).all()
def test_write_partitioned_with_empty_categories(tempdir):
df = pd.DataFrame({
'b': np.random.random(size=1000),
'a': pd.Series(np.random.choice(['x', 'z'], size=1000)).astype(
CategoricalDtype(categories=['x', 'y', 'z'])
),
})
write(tempdir, df, partition_on=['a'], file_scheme='hive', write_index=True)
out = ParquetFile(tempdir).to_pandas()
assert_frame_equal(out, df, check_like=True, check_categorical=False, check_names=False)
def test_autocat(tempdir):
tmp = str(tempdir)
fn = os.path.join(tmp, "test.parq")
data = pd.DataFrame({'o': pd.Categorical(
np.random.choice(['hello', 'world'], size=1000))})
write(fn, data)
pf = ParquetFile(fn)
assert 'o' in pf.categories
assert pf.categories['o'] == 2
assert str(pf.dtypes['o']) == 'category'
out = pf.to_pandas()
assert out.dtypes['o'] == 'category'
out = pf.to_pandas(categories={})
assert str(out.dtypes['o']) != 'category'
out = pf.to_pandas(categories=['o'])
assert out.dtypes['o'].kind == 'O'
out = pf.to_pandas(categories={'o': 2})
assert out.dtypes['o'].kind == 'O'
@pytest.mark.parametrize('row_groups', ([0], [0, 2]))
@pytest.mark.parametrize('dirs', (['', ''], ['cat=1', 'cat=2']))
def test_merge(tempdir, dirs, row_groups):
fn = str(tempdir)
default_mkdirs(os.path.join(fn, dirs[0]))
df0 = pd.DataFrame({'a': [1, 2, 3, 4]})
fn0 = os.sep.join([fn, dirs[0], 'out0.parq'])
write(fn0, df0, row_group_offsets=row_groups)
default_mkdirs(os.path.join(fn, dirs[1]))
df1 = pd.DataFrame({'a': [5, 6, 7, 8]})
fn1 = os.sep.join([fn, dirs[1], 'out1.parq'])
write(fn1, df1, row_group_offsets=row_groups)
# with file-names
pf = writer.merge([fn0, fn1])
assert len(pf.row_groups) == 2 * len(row_groups)
out = pf.to_pandas().a.tolist()
assert out == [1, 2, 3, 4, 5, 6, 7, 8]
if "cat=1" in dirs:
assert 'cat' in pf.cats
# with instances
pf = writer.merge([ParquetFile(fn0), ParquetFile(fn1)])
assert len(pf.row_groups) == 2 * len(row_groups)
out = pf.to_pandas().a.tolist()
assert out == [1, 2, 3, 4, 5, 6, 7, 8]
if "cat=1" in dirs:
assert 'cat' in pf.cats
def test_merge_s3(tempdir, s3):
fn = str(tempdir)
df0 = pd.DataFrame({'a': [1, 2, 3, 4]})
fn0 = TEST_DATA + '/out0.parq'
write(fn0, df0, open_with=s3.open)
df1 = pd.DataFrame({'a': [5, 6, 7, 8]})
fn1 = TEST_DATA + '/out1.parq'
write(fn1, df1, open_with=s3.open)
# with file-names
pf = writer.merge([fn0, fn1], open_with=s3.open)
assert len(pf.row_groups) == 2
out = pf.to_pandas().a.tolist()
assert out == [1, 2, 3, 4, 5, 6, 7, 8]
def test_merge_fail(tempdir):
fn = str(tempdir)
df0 = pd.DataFrame({'a': [1, 2, 3, 4]})
fn0 = os.sep.join([fn, 'out0.parq'])
write(fn0, df0)
df1 = pd.DataFrame({'a': ['a', 'b', 'c']})
fn1 = os.sep.join([fn, 'out1.parq'])
write(fn1, df1)
with pytest.raises(ValueError) as e:
writer.merge([fn0, fn1])
assert 'schemas' in str(e.value)
def test_append_simple(tempdir):
fn = os.path.join(str(tempdir), 'test.parq')
df = pd.DataFrame({'a': [1, 2, 3, 0],
'b': ['a', 'a', 'b', 'b']})
write(fn, df, write_index=False)
write(fn, df, append=True, write_index=False)
pf = ParquetFile(fn)
expected = | pd.concat([df, df], ignore_index=True) | pandas.concat |
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
lreshape,
melt,
wide_to_long,
)
import pandas._testing as tm
class TestMelt:
def setup_method(self, method):
self.df = tm.makeTimeDataFrame()[:10]
self.df["id1"] = (self.df["A"] > 0).astype(np.int64)
self.df["id2"] = (self.df["B"] > 0).astype(np.int64)
self.var_name = "var"
self.value_name = "val"
self.df1 = DataFrame(
[
[1.067683, -1.110463, 0.20867],
[-1.321405, 0.368915, -1.055342],
[-0.807333, 0.08298, -0.873361],
]
)
self.df1.columns = [list("ABC"), list("abc")]
self.df1.columns.names = ["CAP", "low"]
def test_top_level_method(self):
result = melt(self.df)
assert result.columns.tolist() == ["variable", "value"]
def test_method_signatures(self):
tm.assert_frame_equal(self.df.melt(), melt(self.df))
tm.assert_frame_equal(
self.df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"]),
melt(self.df, id_vars=["id1", "id2"], value_vars=["A", "B"]),
)
tm.assert_frame_equal(
self.df.melt(var_name=self.var_name, value_name=self.value_name),
melt(self.df, var_name=self.var_name, value_name=self.value_name),
)
tm.assert_frame_equal(self.df1.melt(col_level=0), melt(self.df1, col_level=0))
def test_default_col_names(self):
result = self.df.melt()
assert result.columns.tolist() == ["variable", "value"]
result1 = self.df.melt(id_vars=["id1"])
assert result1.columns.tolist() == ["id1", "variable", "value"]
result2 = self.df.melt(id_vars=["id1", "id2"])
assert result2.columns.tolist() == ["id1", "id2", "variable", "value"]
def test_value_vars(self):
result3 = self.df.melt(id_vars=["id1", "id2"], value_vars="A")
assert len(result3) == 10
result4 = self.df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"])
expected4 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", "value"],
)
tm.assert_frame_equal(result4, expected4)
def test_value_vars_types(self):
# GH 15348
expected = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", "value"],
)
for type_ in (tuple, list, np.array):
result = self.df.melt(id_vars=["id1", "id2"], value_vars=type_(("A", "B")))
tm.assert_frame_equal(result, expected)
def test_vars_work_with_multiindex(self):
expected = DataFrame(
{
("A", "a"): self.df1[("A", "a")],
"CAP": ["B"] * len(self.df1),
"low": ["b"] * len(self.df1),
"value": self.df1[("B", "b")],
},
columns=[("A", "a"), "CAP", "low", "value"],
)
result = self.df1.melt(id_vars=[("A", "a")], value_vars=[("B", "b")])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"id_vars, value_vars, col_level, expected",
[
(
["A"],
["B"],
0,
DataFrame(
{
"A": {0: 1.067683, 1: -1.321405, 2: -0.807333},
"CAP": {0: "B", 1: "B", 2: "B"},
"value": {0: -1.110463, 1: 0.368915, 2: 0.08298},
}
),
),
(
["a"],
["b"],
1,
DataFrame(
{
"a": {0: 1.067683, 1: -1.321405, 2: -0.807333},
"low": {0: "b", 1: "b", 2: "b"},
"value": {0: -1.110463, 1: 0.368915, 2: 0.08298},
}
),
),
],
)
def test_single_vars_work_with_multiindex(
self, id_vars, value_vars, col_level, expected
):
result = self.df1.melt(id_vars, value_vars, col_level=col_level)
tm.assert_frame_equal(result, expected)
def test_tuple_vars_fail_with_multiindex(self):
# melt should fail with an informative error message if
# the columns have a MultiIndex and a tuple is passed
# for id_vars or value_vars.
tuple_a = ("A", "a")
list_a = [tuple_a]
tuple_b = ("B", "b")
list_b = [tuple_b]
msg = r"(id|value)_vars must be a list of tuples when columns are a MultiIndex"
for id_vars, value_vars in (
(tuple_a, list_b),
(list_a, tuple_b),
(tuple_a, tuple_b),
):
with pytest.raises(ValueError, match=msg):
self.df1.melt(id_vars=id_vars, value_vars=value_vars)
def test_custom_var_name(self):
result5 = self.df.melt(var_name=self.var_name)
assert result5.columns.tolist() == ["var", "value"]
result6 = self.df.melt(id_vars=["id1"], var_name=self.var_name)
assert result6.columns.tolist() == ["id1", "var", "value"]
result7 = self.df.melt(id_vars=["id1", "id2"], var_name=self.var_name)
assert result7.columns.tolist() == ["id1", "id2", "var", "value"]
result8 = self.df.melt(
id_vars=["id1", "id2"], value_vars="A", var_name=self.var_name
)
assert result8.columns.tolist() == ["id1", "id2", "var", "value"]
result9 = self.df.melt(
id_vars=["id1", "id2"], value_vars=["A", "B"], var_name=self.var_name
)
expected9 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
self.var_name: ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", self.var_name, "value"],
)
tm.assert_frame_equal(result9, expected9)
def test_custom_value_name(self):
result10 = self.df.melt(value_name=self.value_name)
assert result10.columns.tolist() == ["variable", "val"]
result11 = self.df.melt(id_vars=["id1"], value_name=self.value_name)
assert result11.columns.tolist() == ["id1", "variable", "val"]
result12 = self.df.melt(id_vars=["id1", "id2"], value_name=self.value_name)
assert result12.columns.tolist() == ["id1", "id2", "variable", "val"]
result13 = self.df.melt(
id_vars=["id1", "id2"], value_vars="A", value_name=self.value_name
)
assert result13.columns.tolist() == ["id1", "id2", "variable", "val"]
result14 = self.df.melt(
id_vars=["id1", "id2"], value_vars=["A", "B"], value_name=self.value_name
)
expected14 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
self.value_name: (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", self.value_name],
)
tm.assert_frame_equal(result14, expected14)
def test_custom_var_and_value_name(self):
result15 = self.df.melt(var_name=self.var_name, value_name=self.value_name)
assert result15.columns.tolist() == ["var", "val"]
result16 = self.df.melt(
id_vars=["id1"], var_name=self.var_name, value_name=self.value_name
)
assert result16.columns.tolist() == ["id1", "var", "val"]
result17 = self.df.melt(
id_vars=["id1", "id2"], var_name=self.var_name, value_name=self.value_name
)
assert result17.columns.tolist() == ["id1", "id2", "var", "val"]
result18 = self.df.melt(
id_vars=["id1", "id2"],
value_vars="A",
var_name=self.var_name,
value_name=self.value_name,
)
assert result18.columns.tolist() == ["id1", "id2", "var", "val"]
result19 = self.df.melt(
id_vars=["id1", "id2"],
value_vars=["A", "B"],
var_name=self.var_name,
value_name=self.value_name,
)
expected19 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
self.var_name: ["A"] * 10 + ["B"] * 10,
self.value_name: (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", self.var_name, self.value_name],
)
tm.assert_frame_equal(result19, expected19)
df20 = self.df.copy()
df20.columns.name = "foo"
result20 = df20.melt()
assert result20.columns.tolist() == ["foo", "value"]
def test_col_level(self):
res1 = self.df1.melt(col_level=0)
res2 = self.df1.melt(col_level="CAP")
assert res1.columns.tolist() == ["CAP", "value"]
assert res2.columns.tolist() == ["CAP", "value"]
def test_multiindex(self):
res = self.df1.melt()
assert res.columns.tolist() == ["CAP", "low", "value"]
@pytest.mark.parametrize(
"col",
[
pd.Series(pd.date_range("2010", periods=5, tz="US/Pacific")),
pd.Series(["a", "b", "c", "a", "d"], dtype="category"),
pd.Series([0, 1, 0, 0, 0]),
],
)
def test_pandas_dtypes(self, col):
# GH 15785
df = DataFrame(
{"klass": range(5), "col": col, "attr1": [1, 0, 0, 0, 0], "attr2": col}
)
expected_value = pd.concat([pd.Series([1, 0, 0, 0, 0]), col], ignore_index=True)
result = melt(
df, id_vars=["klass", "col"], var_name="attribute", value_name="value"
)
expected = DataFrame(
{
0: list(range(5)) * 2,
1: pd.concat([col] * 2, ignore_index=True),
2: ["attr1"] * 5 + ["attr2"] * 5,
3: expected_value,
}
)
expected.columns = ["klass", "col", "attribute", "value"]
tm.assert_frame_equal(result, expected)
def test_preserve_category(self):
# GH 15853
data = DataFrame({"A": [1, 2], "B": pd.Categorical(["X", "Y"])})
result = melt(data, ["B"], ["A"])
expected = DataFrame(
{"B": pd.Categorical(["X", "Y"]), "variable": ["A", "A"], "value": [1, 2]}
)
tm.assert_frame_equal(result, expected)
def test_melt_missing_columns_raises(self):
# GH-23575
# This test is to ensure that pandas raises an error if melting is
# attempted with column names absent from the dataframe
# Generate data
df = DataFrame(np.random.randn(5, 4), columns=list("abcd"))
# Try to melt with missing `value_vars` column name
msg = "The following '{Var}' are not present in the DataFrame: {Col}"
with pytest.raises(
KeyError, match=msg.format(Var="value_vars", Col="\\['C'\\]")
):
df.melt(["a", "b"], ["C", "d"])
# Try to melt with missing `id_vars` column name
with pytest.raises(KeyError, match=msg.format(Var="id_vars", Col="\\['A'\\]")):
df.melt(["A", "b"], ["c", "d"])
# Multiple missing
with pytest.raises(
KeyError,
match=msg.format(Var="id_vars", Col="\\['not_here', 'or_there'\\]"),
):
df.melt(["a", "b", "not_here", "or_there"], ["c", "d"])
# Multiindex melt fails if column is missing from multilevel melt
multi = df.copy()
multi.columns = [list("ABCD"), list("abcd")]
with pytest.raises(KeyError, match=msg.format(Var="id_vars", Col="\\['E'\\]")):
multi.melt([("E", "a")], [("B", "b")])
# Multiindex fails if column is missing from single level melt
with pytest.raises(
KeyError, match=msg.format(Var="value_vars", Col="\\['F'\\]")
):
multi.melt(["A"], ["F"], col_level=0)
def test_melt_mixed_int_str_id_vars(self):
# GH 29718
df = DataFrame({0: ["foo"], "a": ["bar"], "b": [1], "d": [2]})
result = melt(df, id_vars=[0, "a"], value_vars=["b", "d"])
expected = DataFrame(
{0: ["foo"] * 2, "a": ["bar"] * 2, "variable": list("bd"), "value": [1, 2]}
)
tm.assert_frame_equal(result, expected)
def test_melt_mixed_int_str_value_vars(self):
# GH 29718
df = DataFrame({0: ["foo"], "a": ["bar"]})
result = melt(df, value_vars=[0, "a"])
expected = DataFrame({"variable": [0, "a"], "value": ["foo", "bar"]})
tm.assert_frame_equal(result, expected)
def test_ignore_index(self):
# GH 17440
df = DataFrame({"foo": [0], "bar": [1]}, index=["first"])
result = melt(df, ignore_index=False)
expected = DataFrame(
{"variable": ["foo", "bar"], "value": [0, 1]}, index=["first", "first"]
)
tm.assert_frame_equal(result, expected)
def test_ignore_multiindex(self):
# GH 17440
index = pd.MultiIndex.from_tuples(
[("first", "second"), ("first", "third")], names=["baz", "foobar"]
)
df = DataFrame({"foo": [0, 1], "bar": [2, 3]}, index=index)
result = melt(df, ignore_index=False)
expected_index = pd.MultiIndex.from_tuples(
[("first", "second"), ("first", "third")] * 2, names=["baz", "foobar"]
)
expected = DataFrame(
{"variable": ["foo"] * 2 + ["bar"] * 2, "value": [0, 1, 2, 3]},
index=expected_index,
)
tm.assert_frame_equal(result, expected)
def test_ignore_index_name_and_type(self):
# GH 17440
index = pd.Index(["foo", "bar"], dtype="category", name="baz")
df = DataFrame({"x": [0, 1], "y": [2, 3]}, index=index)
result = melt(df, ignore_index=False)
expected_index = pd.Index(["foo", "bar"] * 2, dtype="category", name="baz")
expected = DataFrame(
{"variable": ["x", "x", "y", "y"], "value": [0, 1, 2, 3]},
index=expected_index,
)
tm.assert_frame_equal(result, expected)
def test_melt_with_duplicate_columns(self):
# GH#41951
df = DataFrame([["id", 2, 3]], columns=["a", "b", "b"])
result = df.melt(id_vars=["a"], value_vars=["b"])
expected = DataFrame(
[["id", "b", 2], ["id", "b", 3]], columns=["a", "variable", "value"]
)
tm.assert_frame_equal(result, expected)
class TestLreshape:
def test_pairs(self):
data = {
"birthdt": [
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
],
"birthwt": [1766, 3301, 1454, 3139, 4133],
"id": [101, 102, 103, 104, 105],
"sex": ["Male", "Female", "Female", "Female", "Female"],
"visitdt1": [
"11jan2009",
"22dec2008",
"04jan2009",
"29dec2008",
"20jan2009",
],
"visitdt2": ["21jan2009", np.nan, "22jan2009", "31dec2008", "03feb2009"],
"visitdt3": ["05feb2009", np.nan, np.nan, "02jan2009", "15feb2009"],
"wt1": [1823, 3338, 1549, 3298, 4306],
"wt2": [2011.0, np.nan, 1892.0, 3338.0, 4575.0],
"wt3": [2293.0, np.nan, np.nan, 3377.0, 4805.0],
}
df = | DataFrame(data) | pandas.DataFrame |
from django.shortcuts import render
from plotly.offline import plot
import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
import os
def home(chart):
return render(chart, "index.html")
def engage(chart):
directory = os.getcwd() + "/simulation/engage_sector.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Sector'],
values='Count', width=600, height=500)
eng_sector = plot(fig, output_type='div')
directory = os.getcwd() + "/simulation/engage_location.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Location'],
values='Count', width=600, height=500)
eng_loc = plot(fig, output_type='div')
directory = os.getcwd() + "/simulation/engage_owner.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Owner'],
values='Count', width=600, height=500)
eng_own = plot(fig, output_type='div')
return render(chart, "engage.html", context={'eng_sector': eng_sector, 'eng_loc': eng_loc, 'eng_own': eng_own})
def qualify(chart):
directory = os.getcwd() + "/simulation/qualify_sector.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Sector'],
values='Count', width=600, height=500)
qual_sector = plot(fig, output_type='div')
directory = os.getcwd() + "/simulation/qualify_location.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Location'],
values='Count', width=600, height=500)
qual_loc = plot(fig, output_type='div')
directory = os.getcwd() + "/simulation/qualify_owner.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Owner'],
values='Count', width=600, height=500)
qual_own = plot(fig, output_type='div')
return render(chart, "qualify.html", context={'qual_sector': qual_sector, 'qual_loc': qual_loc, 'qual_own': qual_own})
def design(chart):
directory = os.getcwd() + "/simulation/design_sector.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Sector'],
values='Count', width=600, height=500)
des_sector = plot(fig, output_type='div')
directory = os.getcwd() + "/simulation/design_location.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Location'],
values='Count', width=600, height=500)
des_loc = plot(fig, output_type='div')
directory = os.getcwd() + "/simulation/design_owner.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Owner'],
values='Count', width=600, height=500)
des_own = plot(fig, output_type='div')
return render(chart, "design.html", context={'des_sector': des_sector, 'des_loc': des_loc, 'des_own': des_own})
def propose(chart):
directory = os.getcwd() + "/simulation/propose_sector.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Sector'],
values='Count', width=600, height=500)
prop_sector = plot(fig, output_type='div')
directory = os.getcwd() + "/simulation/propose_location.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Location'],
values='Count', width=600, height=500)
prop_loc = plot(fig, output_type='div')
directory = os.getcwd() + "/simulation/propose_owner.xlsx"
data = | pd.read_excel(directory) | pandas.read_excel |
# -*- encoding: utf-8 -*-
import pandas as pd # main data format
import networkx as nx # main data format
import numpy as np # log10
from scipy import stats as st # pearsonr and spearmanr
import copy # copy and deepcopy
def htbreak(adic, g=4):
alist = [ v for k,v in adic.items() ]
temp = copy.copy(alist)
breaks = []
for i in range(g-1):
avg = sum(temp) / float(len(temp))
breaks.append(avg)
temp2 = [ v for v in temp if v>avg ]
temp = temp2
#alist2 = []
adic2 = {}
for k,v in adic.items():
lvl = None
for i in range(len(breaks)):
if v<=breaks[i]:
lvl = i
break
if lvl is None:
lvl = len(breaks)
#alist2.append(lvl)
adic2[k] = lvl
#print(alist2)
return adic2, breaks
def calculate_IOratio(g, exclude_selfloop=True):
g2 = copy.deepcopy(g)
if exclude_selfloop:
g2.remove_edges_from(g2.selfloop_edges())
indeg = g2.in_degree(weight='weight')
oudeg = g2.out_degree(weight='weight')
ioRatio = {}
for n in g2.nodes():
if oudeg[n]>0:
ioRatio[n] = np.log10(indeg[n]/oudeg[n])
else:
ioRatio[n] = 0
return ioRatio
def calculate_metrices(g, return_dic=True, d=0.95, number_of_loops=1000, weighted=True):
if weighted:
pagerank = nx.pagerank(g, alpha=d, max_iter=number_of_loops)
hub_rank, authority_rank = nx.hits(g, max_iter=number_of_loops)
else:
g2 = copy.deepcopy(g)
for n1,n2,wd in g2.edges(data=True):
g2[n1][n2]['weight'] = float(1.0)
pagerank = nx.pagerank(g2, alpha=d, max_iter=number_of_loops)
hub_rank, authority_rank = nx.hits(g2, max_iter=number_of_loops)
metrices = [pagerank, hub_rank, authority_rank]
metrices_names = ['pagerank', 'hub_rank', 'authority_rank']
"""
cal_res = { n:{} for n in g.nodes() }
for dic, name in zip(metrices, metrices_names):
for n,v in dic.items():
cal_res[n].update({name: v})
"""
#cal_res = { name:res for name,res in zip(metrices_names, metrices) }
cal_res = list( zip(metrices_names, metrices) )
if return_dic:
return cal_res
else:
cal_res2 = { a:b for a,b in cal_res }
df_res = | pd.DataFrame.from_dict(cal_res2) | pandas.DataFrame.from_dict |
import pandas
s = pandas.Series([51, 27, "galleta", 48.1231, 15])
print(s)
ns = | pandas.to_numeric(s,errors="coerce") | pandas.to_numeric |
import dask.dataframe as dd
import deimos
from functools import partial
import multiprocessing as mp
import numpy as np
import pandas as pd
def threshold(features, by='intensity', threshold=0):
'''
Thresholds input :obj:`~pandas.DataFrame` using `by` keyword, greater than
value passed to `threshold`.
Parameters
----------
features : :obj:`~pandas.DataFrame`
Input feature coordinates and intensities.
by : str
Variable to threshold by.
threshold : float
Threshold value.
Returns
-------
:obj:`~pandas.DataFrame`
Thresholded feature coordinates.
'''
return features.loc[features[by] > threshold, :].reset_index(drop=True)
def collapse(features, keep=['mz', 'drift_time', 'retention_time'], how=np.sum):
'''
Collpases input data such that only specified dimensions remain, according
to the supplied aggregation function.
Parameters
----------
features : :obj:`~pandas.DataFrame`
Input feature coordinates and intensities.
keep : str or list
Dimensions to keep during collapse operation.
how : function or str
Aggregation function for collapse operation.
Returns
-------
:obj:`~pandas.DataFrame`
Collapsed feature coordinates and aggregated
intensities.
'''
return features.groupby(by=keep,
as_index=False,
sort=False).agg({'intensity': how})
def locate(features, by=['mz', 'drift_time', 'retention_time'],
loc=[0, 0, 0], tol=[0, 0, 0], return_index=False):
'''
Given a coordinate and tolerances, return a subset of the
data.
Parameters
----------
features : :obj:`~pandas.DataFrame`
Input feature coordinates and intensities.
by : str or list
Dimension(s) by which to subset the data.
loc : float or list
Coordinate location.
tol : float or list
Tolerance in each dimension.
return_index : bool
Return boolean index of subset if True.
Returns
-------
:obj:`~pandas.DataFrame`
Subset of feature coordinates and intensities.
:obj:`~numpy.array`
If `return_index` is True, boolean index of subset elements,
i.e. `features[index] = subset`.
Raises
------
ValueError
If `by`, `loc`, and `tol` are not the same length.
'''
# safely cast to list
by = deimos.utils.safelist(by)
loc = deimos.utils.safelist(loc)
tol = deimos.utils.safelist(tol)
# check dims
deimos.utils.check_length([by, loc, tol])
if features is None:
if return_index is True:
return None, None
else:
return None
# store index
rindex = features.index.values
# extend columns
cols = features.columns
cidx = [cols.get_loc(x) for x in by]
# subset by each dim
features = features.values
idx = np.full(features.shape[0], True, dtype=bool)
for i, x, dx in zip(cidx, loc, tol):
idx *= (features[:, i] <= x + dx) & (features[:, i] >= x - dx)
features = features[idx]
rindex = rindex[idx]
if return_index is True:
# data found
if features.shape[0] > 0:
return | pd.DataFrame(features, index=rindex, columns=cols) | pandas.DataFrame |
# Code to load graph data as networkx graph and dump it as pickle file
# Author: <NAME> 2021-12-20
import networkx as nx # https://networkx.org/documentation/stable/tutorial.html
import csv
import argparse
import stellargraph as sg
import pickle
from stellargraph.data import EdgeSplitter
import pandas as pd
def load_graph(edges):
G = nx.MultiDiGraph()
# add the nodes
V = add_graph_data(G, edges)
return G, V
def add_graph_data(G, edges):
V = {}
count = 1
with open(edges, 'r') as csvfile:
datareader = csv.reader(csvfile)
for e in datareader:
u = int(e[0])
v = int(e[1])
if u not in V:
V[u] = count
count += 1
if v not in V:
V[v] = count
count += 1
G.add_node(V[u])
G.add_node(V[v])
G.add_edge(V[u], V[v])
return V
def save_labels_as_csv(labels_path, out_path, V):
node_classes = {}
node_labels = []
class_num = 0
with open(labels_path, 'r') as csvfile:
datareader = csv.reader(csvfile)
for row in datareader:
node = int(row[0])
label = row[1]
if label in node_classes:
node_labels.append([V[node], node_classes[label]])
else:
node_classes[label] = class_num
class_num += 1
node_labels.append([V[node], node_classes[label]])
# write the labels to file
df = pd.DataFrame(node_labels)
df.to_csv(out_path + 'group-edges.csv', header=None, index=False)
classes_list = list(node_classes.items())
df_c = | pd.DataFrame(classes_list) | pandas.DataFrame |
import io
import pandas as pd
import pytest
from pyodc import codec
from pyodc.codec import select_codec
from pyodc.stream import LittleEndianStream
def _check_encode(codec, series, encode_compare):
f = io.BytesIO()
st = LittleEndianStream(f)
for v in series:
codec.encode(st, v)
f.seek(0)
assert f.read() == encode_compare
def test_int8_range_encoding():
# Also test with negative numbers!
for offset in (0, -100):
s = pd.Series((1 + offset, 2 ** 8 + offset))
c = select_codec("column", s, None)
assert type(c) == codec.Int8
assert c.min == 1 + offset
_check_encode(c, s, b"\x00\xff")
def test_int16_range_encoding_minimal():
"""
A span of integers that _just_ requires int16
"""
# Also test with negative numbers!
for offset in (0, -10000):
s = pd.Series((1 + offset, 2 ** 8 + offset + 1))
c = select_codec("column", s, None)
assert type(c) == codec.Int16
assert c.min == 1 + offset
_check_encode(c, s, b"\x00\x00\x00\x01")
def test_int16_range_encoding_maximal():
# Also test with negative numbers!
for offset in (0, -10000):
s = pd.Series((1 + offset, 2 ** 8 + offset, 2 ** 16 + offset))
c = select_codec("column", s, None)
assert type(c) == codec.Int16
assert c.min == 1 + offset
_check_encode(c, s, b"\x00\x00\xff\x00\xff\xff")
def test_int32_range_encoding():
"""
n.b. the Int32 codec is a bit crappy. It does _not_ include an offset value
--> It only encodes the legit values of a SIGNED 32bit integer
--> 64bit integers are todo (but break some fortran compatibility, as not all
64bit integers can be represented as doubles).
--> Can include missing values
"""
s = pd.Series((-(2 ** 31), None, 2 ** 31 - 2))
c = select_codec("column", s, None)
assert isinstance(c, codec.Int32)
assert c.min == -(2 ** 31)
_check_encode(c, s, b"\x00\x00\x00\x80\xff\xff\xff\x7f\xfe\xff\xff\x7f")
def test_wider_range_unsupported():
s = pd.Series((-(2 ** 31), 2 ** 31 - 1))
with pytest.raises(NotImplementedError):
select_codec("column", s, None)
def test_int8_missing_range_encoding():
# Also test with negative numbers!
for offset in (0, -100):
s = pd.Series((1 + offset, None, 2 ** 8 + offset - 1))
c = select_codec("column", s, None)
assert type(c) == codec.Int8Missing
assert c.min == 1 + offset
_check_encode(c, s, b"\x00\xff\xfe")
def test_int16_missing_range_encoding_minimal():
# Also test with negative numbers!
for offset in (0, -100):
s = | pd.Series((1 + offset, None, 2 ** 8 + offset)) | pandas.Series |
import requests
import pandas as pd
import datetime
import talib
def download_daily_data(ticker):
return download_data(datatype='K_DAY', ticker=ticker)
def downlaod_multiple_daily(tickers):
dfs = list()
for tk in tickers:
dfs.append(download_daily_data(tk))
return | pd.concat(dfs, axis=0, keys=tickers) | pandas.concat |
import argparse
import os
import logging
from netCDF4 import Dataset
import numpy as np
import pandas as pd
def nc2csv_obs_and_M(src_file_path, dst_dir):
with Dataset(src_file_path) as nc:
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
stations = nc.variables['station'][:]
date = nc.variables['date'][:]
id_list = []
for i in range(len(date)):
for j in range(37):
id = str(date[i])[:8] + '_' + '{:02d}'.format(j)
id_list.append(id)
ID = pd.Series(data=id_list, name='Time')
for i in range(len(stations)):
csv = pd.concat([ID], axis=1)
for var in ['t2m_obs', 'rh2m_obs', 'w10m_obs', 't2m_M', 'rh2m_M', 'w10m_M']:
var_arr = np.array(nc.variables[var][:])
var_arr = np.squeeze(var_arr[:, :, i].reshape(-1, 1))
var_arr[var_arr < -8000] = np.NaN
csv[var] = var_arr
csv.to_csv(os.path.join(dst_dir,str(stations[i]) + '.csv'), index=False)
print(stations[i],' done!')
def nc2csv_merge_pre_and_next(src_file_path,str_lastday,dst_dir):
with Dataset(src_file_path) as nc:
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
stations = nc.variables['station'][:]
date = nc.variables['date'][:]
id_list = []
for i in range(len(date)):
for j in range(24):
id = str(date[i])[:8] + ' ' + '{:02d}'.format(j)
id_list.append(id)
for j in range(13):
id = str_lastday + ' ' + '{:02d}'.format(j)
id_list.append(id)
Time = pd.to_datetime(id_list)
ID = pd.Series(data=Time, name='Time')
for i in range(len(stations)):
csv = pd.concat([ID], axis=1)
for var in nc.variables:
if var.endswith('obs'):
var_arr = np.array(nc.variables[var][:])
elif var.endswith('M'):
var_arr = np.array(nc.variables[var][:])
for j in range(1, var_arr.shape[0]):
for k in range(13):
pre = var_arr[j - 1, 24 + k, i]
current = var_arr[j, k, i]
if current == -9999:
var_arr[j, k, i] = pre
elif current != -9999 and pre != -9999:
var_arr[j, k, i] = (pre + current) / 2
else:
continue
var_arr_first = np.squeeze(var_arr[:, :24, i].reshape(-1, 1))
var_arr_last = np.squeeze((var_arr[-1, 24:, i]).reshape(-1, 1))
var_arr = np.r_[var_arr_first, var_arr_last]
var_arr[var_arr < -8000] = np.NaN
csv[var] = var_arr
csv.to_csv(os.path.join(dst_dir,str(stations[i]) + '.csv'), index=False)
print(stations[i],' done!')
def nc2csv_remain_all_info(src_file_path,str_lastday,dst_dir):
with Dataset(src_file_path) as nc:
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
stations = nc.variables['station'][:]
date = nc.variables['date'][:]
id_list = []
for i in range(len(date)):
for j in range(24):
id = str(date[i])[:8] + ' ' + '{:02d}'.format(j)
id_list.append(id)
for j in range(13):
id = str_lastday + ' ' + '{:02d}'.format(j)
id_list.append(id)
Time = | pd.to_datetime(id_list) | pandas.to_datetime |
from calendar import monthrange
from datetime import datetime
import pandas as pd
from flask import Blueprint, jsonify, abort, g
from gatekeeping.api.budget import get_budget
from gatekeeping.api.position import get_positions
from gatekeeping.api.function import get_functions, get_function
from gatekeeping.api.user import get_user_function
def get_line_chart(function=None):
positions = get_positions(check_submitter=False)
budget = get_budget()
columns = [row.keys() for row in positions]
positions = pd.DataFrame(positions, columns=columns[0])
budget = pd.DataFrame(budget, columns=columns[0])
if function:
if function != 'All':
positions = positions.loc[positions['function'] == function]
budget = budget.loc[budget['function'] == function]
if g.user['type'] != 'ADMIN' and function == 'All':
functions = get_user_function(g.user['id'])
function_names = [get_function(function['function_id'])['name'] for function in functions]
positions = positions.loc[positions['function'].isin(function_names)]
budget = budget.loc[budget['function'].isin(function_names)]
positions['FTE'] = pd.to_numeric(positions['hours'], errors='coerce') / 40
budget['FTE'] = pd.to_numeric(budget['hours'], errors='coerce') / 40
positions['salary'] = pd.to_numeric(positions['salary'], errors='coerce')
positions['fringe_benefit'] = pd.to_numeric(positions['fringe_benefit'], errors='coerce')
positions['social_security_contribution'] = pd.to_numeric(positions['social_security_contribution'], errors='coerce')
budget['salary'] = pd.to_numeric(budget['salary'], errors='coerce')
budget['fringe_benefit'] = pd.to_numeric(budget['fringe_benefit'], errors='coerce')
budget['social_security_contribution'] = | pd.to_numeric(budget['social_security_contribution'], errors='coerce') | pandas.to_numeric |
import numpy as np
import pandas as pd
class Stats(object):
#init to 0 for all attributes
#TODO: implement magnitude fields and methods
accX = pd.DataFrame()
accY = pd.DataFrame()
accZ = | pd.DataFrame() | pandas.DataFrame |
# Importing necessary packages
import pandas as pd
import numpy as np
import datetime
import geocoder
from geopy.geocoders import Nominatim
from darksky.api import DarkSky, DarkSkyAsync
from darksky.types import languages, units, weather
# Reading monthly yellow taxi trip data for 2019
df1 = pd.read_csv("yellow_tripdata_2019-01.csv", low_memory = False)
df2 = pd.read_csv("yellow_tripdata_2019-02.csv", low_memory = False)
df3 = pd.read_csv("yellow_tripdata_2019-03.csv", low_memory = False)
df4 = pd.read_csv("yellow_tripdata_2019-04.csv", low_memory = False)
df5 = pd.read_csv("yellow_tripdata_2019-05.csv", low_memory = False)
df6 = pd.read_csv("yellow_tripdata_2019-06.csv", low_memory = False)
df7 = pd.read_csv("yellow_tripdata_2019-07.csv", low_memory = False)
df8 = pd.read_csv("yellow_tripdata_2019-08.csv", low_memory = False)
df9 = pd.read_csv("yellow_tripdata_2019-09.csv", low_memory = False)
df10 = pd.read_csv("yellow_tripdata_2019-10.csv", low_memory = False)
df11 = pd.read_csv("yellow_tripdata_2019-11.csv", low_memory = False)
df12 = pd.read_csv("yellow_tripdata_2019-12.csv", low_memory = False)
# Dropping rows with N/A's in each month
df1.dropna(inplace = True)
df2.dropna(inplace = True)
df3.dropna(inplace = True)
df4.dropna(inplace = True)
df5.dropna(inplace = True)
df6.dropna(inplace = True)
df7.dropna(inplace = True)
df8.dropna(inplace = True)
df9.dropna(inplace = True)
df10.dropna(inplace = True)
df11.dropna(inplace = True)
df12.dropna(inplace = True)
# Concatenating monthly data and removing unnecessary columns
data = pd.concat([df1, df2, df3, df4, df5, df6, df7, df8, df9, df10, df11, df12])
data.drop(['VendorID', 'store_and_fwd_flag', 'congestion_surcharge', 'RatecodeID', 'payment_type', 'fare_amount', 'extra', 'mta_tax', 'tip_amount', 'tolls_amount', 'improvement_surcharge', 'congestion_surcharge'], axis = 1, inplace = True)
# Removing insensible data
data = data.loc[(data.passenger_count > 0),:] # Passenger count cannot be less than or equal zero
data = data.loc[(data.trip_distance > 0),:] # Trip Distance should be greater than zero
data = data.loc[(data.total_amount > 2.5),:] # Minimum Yellow Taxi fare in NYC is $2.50
data = data[data.total_amount < 75] # Removing outliers
data = data[data.trip_distance < 40] # Removing outliers
# Removing entries other than 2019
data['Pyear'] = pd.DatetimeIndex(data['tpep_pickup_datetime']).year
data['Dyear'] = pd.DatetimeIndex(data['tpep_dropoff_datetime']).year
data = data.loc[(data.Pyear == 2019),:]
data = data.loc[(data.Dyear == 2019),:]
# Calculating Duration from Pickup and Dropoff Timestamp
data['tpep_pickup_datetime'] = pd.to_datetime(data.tpep_pickup_datetime)
data['tpep_dropoff_datetime'] = pd.to_datetime(data.tpep_dropoff_datetime)
data['Duration'] = data['tpep_dropoff_datetime'] - data['tpep_pickup_datetime']
data['Duration'] = data.Duration.dt.total_seconds()/60
data['Duration'] = round(data['Duration'])
data = data[data.Duration > 0]
data = data[data.Duration < 180]
# Feature Extraction
data['Month'] = pd.DatetimeIndex(data['tpep_pickup_datetime']).month
data['Day'] = pd.DatetimeIndex(data['tpep_pickup_datetime']).day_name()
data['Hour'] = | pd.DatetimeIndex(data['tpep_pickup_datetime']) | pandas.DatetimeIndex |
import os,sys
from PyQt5.QtWidgets import QMainWindow, QAction, QMenu, QApplication
sys.path.append(r'/home/remy/Calypso/Projects/004_Toto/Toto')
from totoview.dialog.message import wrapper_plugins
import toto
import pandas as pd
import numpy as np
app = QApplication(sys.argv)
dates = | pd.date_range('1/1/2000', periods=360) | pandas.date_range |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 26 22:55:37 2020
@author: <NAME> <EMAIL>
Data and Model from:
A conceptual model for the coronavirus disease 2019 (COVID-19)
outbreak in Wuhan, China with individual reaction and
governmental action
DOI:https://doi.org/10.1016/j.ijid.2020.02.058
https://www.ijidonline.com/article/S1201-9712(20)30117-X/fulltext
"""
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
import numpy as np
import math
import pandas as pd
import os
import time
start = time.time() #Real time when the program starts to run
clear = lambda: os.system('cls')
cwd = os.getcwd()
dir_path = os.path.dirname(os.path.realpath(__file__))
path_fol = "{}\SEIR Model for Spread of Disease".format(dir_path)
try:
os.mkdir(path_fol)
except:
pass
def R0(α, β, μ, γ):
# R_0 = (α/(μ + α))*(β/(μ + λ))
R_0 = (β/γ)*(α/(α + μ))
return R_0
def R0b(β, γ, σ, μ):
return (β*σ)/((γ + μ)*(μ + σ))
def aplha(day):
if int(day) <= 23:
return 0
elif int(day) > 23 and int(day) <= 29:
return 0.4239
else:
return 0.8478
def Beta(α,β0, D, N, k):
B = β0*(1 - α)*((1 - D/N)**k)
return B
def SEIR(t, y, *args):
σ, β, γ, μ, Λ, F, α, d, κ, λ = args
β_t = Beta(α, β, y[5], y[4], κ)
dsdt = Λ - μ*y[0] - ((β*F*y[0])/y[4]) - (β_t/y[4])*y[2]*y[0]
dedt = ((β*F*y[0])/y[4]) + (β_t/y[4])*y[2]*y[0] - (μ + σ)*y[1]
didt = σ*y[1] - (μ + γ)*y[2]
drdt = γ*y[2] - μ*y[3]
dndt = -μ*y[4]
dDdt = d*γ*y[2] - λ*y[5]
dcdt = σ*y[1]
return [dsdt, dedt, didt, drdt, dndt, dDdt, dcdt]
def jacobian(t, y, *args):
σ, β, γ, μ, Λ, F, α, d, κ, λ = args
β_t = Beta(α, β, y[5], y[4], κ)
return [[-F*β/y[4]- y[2]*β_t/y[4]- μ, 0, -y[0]*β_t/y[4], 0, F*y[0]*β/y[4]**2 + y[2]*y[0]*β_t/y[4]**2, 0, 0],
[ F*β/y[4]+ y[2]*β_t/y[4], -μ - σ, y[0]*β_t/y[4], 0, -F*y[0]*β/y[4]**2 - y[2]*y[0]*β_t/y[4]**2, 0, 0],
[ 0, σ, -γ - μ, 0, 0, 0, 0],
[ 0, 0, γ, -μ, 0, 0, 0],
[ 0, 0, 0, 0, -μ, 0, 0],
[ 0, 0, d*γ, 0, 0, -λ, 0],
[ 0, σ, 0, 0, 0, 0, 0]]
def roundup(x, places):
return int(math.ceil(x / int(places))) * int(places)
Λ = 0.0 # Birth rate
μ = 0.0 # Death rate
# Λ = 0.01 # Birth rate
# μ = 0.0205 # Death rate
Tc = 2.0 # Typical time between contacts
# β = 0.5944 #1.0/Tc
β = 1.68
# Tr = 11.1 # Typical time until recovery
Tinfs = [2.9, 2.3, 2.3, 2.9, 10.0, 1.5]
# Tr = sum(Tinfs)/len(Tinfs) #5.0
# Tr = 11.1
Tr = 14.0
γ = 1.0/Tr
Tincs = [5.2, 5.2, 6.1, 5.5, 4.8, 5.0, 6.5, 4.8]
Tinc = sum(Tincs)/len(Tincs)
σ = Tinc**-1
# σ = 3.0**-1
F = 10
α = 0.0
# α = 0.05
# α = 0.4239
# α = 0.8478
d = 0.05
# k = 1117.3
# k = 200
k = 0
λb = 11.2
λ = λb**-1
Infi = 10 # Initial infected
Daysnn = 150
NP = 329436928 # 1437904257
S0 = NP - Infi
its = 10000
itern = Daysnn/its
Days = [0.0, Daysnn]
Time = [i for i in range(0, int(Daysnn + 1), 1)]
tt = list(range(0,its,1))
Time_f = [i*itern for i in tt]
Y0 = [NP, 0.0, Infi, 0.0, NP, d, Infi]
Ro = R0b(β, γ, σ, μ)
# print(Ro)
# print('Λ')
# print('μ')
# print(α)
# print(β)
# print(Ro, 1.68**-1)
# print(λ)
# print(σ)
answer = solve_ivp(SEIR, Days, Y0, t_eval=Time_f, method = 'Radau', args=(σ, β, γ, μ, Λ, F, α, d, k, λ), jac=jacobian, rtol=1E-10, atol=1E-10)
ts = answer.t
Bs = [Beta(σ, β, i, j, k) for i,j in zip(answer.y[5],answer.y[4])]
Sn = answer.y[0]
En = answer.y[1]
In = answer.y[2]
Rn = answer.y[3]
Nn = answer.y[4]
Dn = answer.y[5]
Cn = answer.y[6]
Spb = answer.y[0]/NP
Epb = answer.y[1]/NP
Ipb = answer.y[2]/NP
Rpb = answer.y[3]/NP
Npb = answer.y[4]/NP
Dpb = answer.y[5]/NP
Cpb = answer.y[6]/NP
Sp = [i*100.0 for i in Spb]
Ep = [i*100.0 for i in Epb]
Ip = [i*100.0 for i in Ipb]
Rp = [i*100.0 for i in Rpb]
Np = [i*100.0 for i in Npb]
Dp = [i*100.0 for i in Dpb]
Cp = [i*100.0 for i in Cpb]
m = max(In)
mi = (In.tolist()).index(max(In))
mip = mi/its
peakn = round(Daysnn*mip)
my = max(Ip)
myi = (Ip).index(max(Ip))
myp = myi/its
peakyn = round(Daysnn*myp)
PEAK = [int(round(Daysnn*(mi/its)))]
nPEAK = np.array(PEAK, ndmin=2)
Tdata = np.array((Time_f, Sn, En, In, Rn))
TTdata = np.array((Time_f, Spb, Epb, Ipb, Rpb, Sp, Ep, Ip, Rp))
Tdatal = Tdata.tolist()
if its <= 16384:
writer = pd.ExcelWriter(r'{}\SIR Population.xlsx', engine='xlsxwriter')
writerp = | pd.ExcelWriter(r'{}\SIR Percent.xlsx', engine='xlsxwriter') | pandas.ExcelWriter |
import pkg_resources
from unittest.mock import sentinel
import pandas as pd
import pytest
import osmo_jupyter.dataset.combine as module
@pytest.fixture
def test_picolog_file_path():
return pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_picolog.csv"
)
@pytest.fixture
def test_calibration_file_path():
return pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_calibration_log.csv"
)
class TestOpenAndCombineSensorData:
def test_interpolates_data_correctly(
self, test_calibration_file_path, test_picolog_file_path
):
combined_data = module.open_and_combine_picolog_and_calibration_data(
calibration_log_filepaths=[test_calibration_file_path],
picolog_log_filepaths=[test_picolog_file_path],
).reset_index() # move timestamp index to a column
# calibration log has 23 columns, but we only need to check that picolog data is interpolated correctly
subset_combined_data_to_compare = combined_data[
[
"timestamp",
"equilibration status",
"setpoint temperature (C)",
"PicoLog temperature (C)",
]
]
expected_interpolation = pd.DataFrame(
[
{
"timestamp": "2019-01-01 00:00:00",
"equilibration status": "waiting",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 39,
},
{
"timestamp": "2019-01-01 00:00:01",
"equilibration status": "equilibrated",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 39.5,
},
{
"timestamp": "2019-01-01 00:00:03",
"equilibration status": "equilibrated",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 40,
},
{
"timestamp": "2019-01-01 00:00:04",
"equilibration status": "waiting",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 40,
},
]
).astype(
subset_combined_data_to_compare.dtypes
) # coerce datatypes to match
pd.testing.assert_frame_equal(
subset_combined_data_to_compare, expected_interpolation
)
class TestGetEquilibrationBoundaries:
@pytest.mark.parametrize(
"input_equilibration_status, expected_boundaries",
[
(
{ # Use full timestamps to show that it works at second resolution
pd.to_datetime("2019-01-01 00:00:00"): "waiting",
pd.to_datetime("2019-01-01 00:00:01"): "equilibrated",
pd.to_datetime("2019-01-01 00:00:02"): "equilibrated",
pd.to_datetime("2019-01-01 00:00:03"): "waiting",
},
[
{
"start_time": pd.to_datetime("2019-01-01 00:00:01"),
"end_time": pd.to_datetime("2019-01-01 00:00:02"),
}
],
),
(
{ # Switch to using only years as the timestamp for terseness and readability
pd.to_datetime("2019"): "waiting",
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
}
],
),
(
{
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
pd.to_datetime("2022"): "equilibrated",
pd.to_datetime("2023"): "waiting",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
},
{
"start_time": pd.to_datetime("2022"),
"end_time": pd.to_datetime("2022"),
},
],
),
(
{
pd.to_datetime("2019"): "waiting",
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
pd.to_datetime("2022"): "equilibrated",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
},
{
"start_time": pd.to_datetime("2022"),
"end_time": pd.to_datetime("2022"),
},
],
),
(
{
pd.to_datetime("2019"): "waiting",
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
pd.to_datetime("2022"): "equilibrated",
pd.to_datetime("2023"): "waiting",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
},
{
"start_time": pd.to_datetime("2022"),
"end_time": pd.to_datetime("2022"),
},
],
),
(
{
pd.to_datetime("2019"): "equilibrated",
pd.to_datetime("2020"): "waiting",
},
[
{
"start_time": pd.to_datetime("2019"),
"end_time": pd.to_datetime("2019"),
}
],
),
(
{
pd.to_datetime("2019"): "waiting",
pd.to_datetime("2020"): "equilibrated",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
}
],
),
(
{
pd.to_datetime("2019"): "equilibrated",
pd.to_datetime("2020"): "waiting",
pd.to_datetime("2021"): "equilibrated",
},
[
{
"start_time": pd.to_datetime("2019"),
"end_time": pd.to_datetime("2019"),
},
{
"start_time": pd.to_datetime("2021"),
"end_time": pd.to_datetime("2021"),
},
],
),
],
)
def test_finds_correct_edges(self, input_equilibration_status, expected_boundaries):
parsed_equilibration_boundaries = module.get_equilibration_boundaries(
equilibration_status=pd.Series(input_equilibration_status)
)
expected_equilibration_boundaries = pd.DataFrame(
expected_boundaries,
columns=["start_time", "end_time"],
dtype="datetime64[ns]",
).reset_index(
drop=True
) # Coerce to a RangeIndex when creating empty DataFrame
pd.testing.assert_frame_equal(
parsed_equilibration_boundaries, expected_equilibration_boundaries
)
class TestPivotProcessExperimentResults:
def test_combines_image_rows_by_ROI(self):
test_process_experiment_file_path = pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_process_experiment_result.csv"
)
test_process_experiment_data = pd.read_csv(
test_process_experiment_file_path, parse_dates=["timestamp"]
)
pivot_results = module.pivot_process_experiment_results_on_ROI(
experiment_df=test_process_experiment_data,
ROI_names=list(test_process_experiment_data["ROI"].unique()),
pivot_column_names=["r_msorm", "g_msorm"],
)
expected_results_data = (
pd.DataFrame(
[
{
"timestamp": pd.to_datetime("2019-01-01 00:00:00"),
"ROI 0 r_msorm": 0.5,
"ROI 1 r_msorm": 0.4,
"ROI 0 g_msorm": 0.4,
"ROI 1 g_msorm": 0.5,
"image": "image-0.jpeg",
},
{
"timestamp": pd.to_datetime("2019-01-01 00:00:02"),
"ROI 0 r_msorm": 0.3,
"ROI 1 r_msorm": 0.6,
"ROI 0 g_msorm": 0.6,
"ROI 1 g_msorm": 0.3,
"image": "image-1.jpeg",
},
]
)
.set_index("timestamp")
.astype(pivot_results.dtypes)
)
pd.testing.assert_frame_equal(pivot_results, expected_results_data)
class TestOpenAndCombineProcessExperimentResults:
def test_keeps_distinct_rows_for_images_with_same_timestamp(self):
test_process_experiment_file_path = pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_process_experiment_result.csv"
)
# Open the same file twice to ensure there are duplicate timestamps
pivot_results = module.open_and_combine_process_experiment_results(
process_experiment_result_filepaths=[
test_process_experiment_file_path,
test_process_experiment_file_path,
]
)
unique_timestamps = pivot_results.index.unique()
assert len(unique_timestamps) == len(pivot_results) / 2
class TestFilterEquilibratedImages:
def test_returns_only_equilibrated_images(self):
test_roi_data = pd.DataFrame(
[
{"timestamp": pd.to_datetime("2019-01-01"), "image": "image-0.jpeg"},
{"timestamp": pd.to_datetime("2019-01-03"), "image": "image-1.jpeg"},
]
).set_index("timestamp")
test_equilibration_boundaries = pd.Series(
{
"start_time": pd.to_datetime("2019-01-02"),
"end_time": pd.to_datetime("2019-01-04"),
}
)
equilibrated_image_data = module.filter_equilibrated_images(
equilibration_range=test_equilibration_boundaries, df=test_roi_data
)
expected_equilibrated_image_data = test_roi_data[1:]
pd.testing.assert_frame_equal(
equilibrated_image_data, expected_equilibrated_image_data
)
class TestGetImagesByExperiment:
def test_combines_experiment_metadata_correctly(self, mocker):
mock_image_data = pd.DataFrame(
{
"experiment_name": [
sentinel.experiment_1,
sentinel.experiment_1,
sentinel.experiment_2,
],
"image_filename": [
sentinel.image_1,
sentinel.image_2,
sentinel.image_3,
],
}
)
mocker.patch.object(
module, "get_all_experiment_image_filenames", return_value=mock_image_data
)
mocker.patch.object(
module,
"datetime_from_filename",
side_effect=[
pd.to_datetime("2019-01-01 00:00:01"),
pd.to_datetime("2019-01-01 00:00:02"),
pd.to_datetime("2019-01-01 00:00:03"),
],
)
test_experiment_metadata = pd.Series(
{
"experiment_names": [sentinel.experiment_1, sentinel.experiment_2],
"cartridge_id": sentinel.cartridge_id,
"cosmobot_id": sentinel.cosmobot_id,
"pond": sentinel.pond,
}
)
actual_images_with_metadata = module.get_all_attempt_image_filenames(
test_experiment_metadata
)
expected_images_with_metadata = pd.DataFrame(
{
"timestamp": [
pd.to_datetime("2019-01-01 00:00:01"),
| pd.to_datetime("2019-01-01 00:00:02") | pandas.to_datetime |
'''
Implements the covariance matrix from Price and Rogers, 2014.
Adapted from Julia source at https://github.com/ExoJulia/ExoplanetsSysSim.jl/blob/master/src/transit_observations.jl.
'''
import numpy as np
from constants import kepler_texp as texp
from constants import LC_rate
from constants import eps
from constants import minmeanmax
from utils import get_catalog, get_snr
import warnings
def make_pd(matrix):
'''
Performs ridge regression to make a matrix positive definite.
Arguments
---------
matrix : np.ndarray
The matrix to change.
Returns
-------
matrix_pd : np.ndarray
The matrix with the smallest eigenvalue shifted.
'''
matrix = np.nan_to_num(matrix)
smallest_eig = np.min(np.linalg.eigvals(matrix))
if smallest_eig > 0:
return matrix
else:
return matrix + (1 + eps) * abs(smallest_eig) * np.identity(matrix.shape[0])
def make_cov(delta, T, tau, period, num_tr, snr, sigma, diagonal=False):
'''
Makes the covariance matrix as in Equation A8 from Price and Rogers, 2014.
cov is the covariance matrix of {t_c, tau, T, delta}.
Arguments
---------
delta, T, tau, period, num_tr, snr : scalar
Transit parameters.
delta - transit depth
T - FWHM transit duration
tau - ingress/egress duration
period - period of transit
num_tr - number of transits
snr - signal-to-noise ratio of the transit
sigma : scalar
Noise scale factor.
diagonal : boolean
Whether to keep only diagonal elements.
'''
if len(delta) > 1:
cov_shape = (4, 4, len(delta))
else:
cov_shape = (4, 4)
gamma = LC_rate * num_tr
tau3 = tau ** 3
texp3 = texp ** 3
a1 = (10*tau3+2*texp ** 3-5*tau*texp ** 2)/tau3
a2 = (5*tau3+texp3-5*tau*tau*texp)/tau3
a3 = (9*texp ** 5*period-40*tau3*texp*texp*period+120*tau ** 4*texp*(3*period-2*tau))/tau ** 6
a4 = (a3*tau ** 5+texp ** 4*(54*tau-35*period)-12*tau*texp3*(4*tau+period)+360*tau ** 4*(tau-period))/tau ** 5
a5 = (a2*(24*T*T*(texp-3*tau)-24*T*period*(texp-3*tau))+tau3*a4)/tau3
a6 = (3*tau*tau+T*(texp-3*tau))/(tau*tau)
a7 = (-60*tau ** 4+12*a2*tau3*T-9*texp ** 4+8*tau*texp3+40*tau3*texp)/(tau ** 4)
a8 = (2*T-period)/tau
a9 = (-3*tau*tau*texp*(-10*T*T+10*T*period+texp*(2*texp+5*period))-texp ** 4*period+8*tau*texp3*period)/(tau ** 5)
a10 = ((a9+60)*tau*tau+10*(-9*T*T+9*T*period+texp*(3*texp+period))-75*tau*period)/(tau*tau)
a11 = (texp*period-3*tau*(period-2*tau))/(tau*tau)
a12 = (-360*tau ** 5-24*a2*tau3*T*(texp-3*tau)+9*texp ** 5-35*tau*texp ** 4-12*tau*tau*texp3-40*tau3*texp*texp+360*tau ** 4*texp)/(tau ** 5)
a13 = (-3*texp3*(8*T*T-8*T*period+3*texp*period)+120*tau*tau*T*texp*(T-period)+8*tau*texp3*period)/tau ** 5
a14 = (a13*tau*tau+40*(-3*T*T+3*T*period+texp*period)-60*tau*period)/(tau*tau)
a15 = (2*texp-6*tau)/tau
b1 = (6*texp*texp-3*texp*period+tau*period)/(texp*texp)
b2 = (tau*T+3*texp*(texp-T))/(texp*texp)
b3 = (tau3-12*T*texp*texp+8*texp3+20*tau*texp*texp-8*tau*tau*texp)/texp3
b4 = (6*T*T-6*T*period+texp*(5*period-4*texp))/(texp*texp)
b5 = (10*texp-3*tau)/texp
b6 = (12*b4*texp3+4*tau*(-6*T*T+6*T*period+texp*(13*period-30*texp)))/texp3
b7 = (b6*texp ** 5+4*tau*tau*texp*texp*(12*texp-11*period)+tau3*texp*(11*period-6*texp)-tau ** 4*period)/texp ** 5
b8 = (3*T*T-3*T*period+texp*period)/(texp*texp)
b9 = (8*b8*texp ** 4+20*tau*texp*texp*period-8*tau*tau*texp*period+tau3*period)/texp ** 4
b10 = (-tau ** 4+24*T*texp*texp*(tau-3*texp)+60*texp ** 4+52*tau*texp3-44*tau*tau*texp*texp+11*tau3*texp)/texp ** 4
b11 = (-15*b4*texp3+10*b8*tau*texp*texp+15*tau*tau*(2*texp-period))/texp3
b12 = (b11*texp ** 5+2*tau3*texp*(4*period-3*texp)-tau ** 4*period)/texp ** 5
b13 = (period-2*T)/texp
b14 = (6*texp-2*tau)/texp
Q = snr/np.sqrt(num_tr)
tmax = np.maximum(tau, texp)
sigma_t0 = np.sqrt(0.5*tmax*T/(1-texp/(3*tmax)))/Q
sigma_period = sigma_t0/np.sqrt(num_tr)
sigma_dur1 = sigma*np.sqrt(abs(6*tau*a14/(delta*delta*a5)) / gamma)
sigma_dur2 = sigma*np.sqrt(abs(6*texp*b9/(delta*delta*b7)) / gamma)
choose_1_where = tau >= texp
sigma_duration = sigma_dur1 * choose_1_where + sigma_dur2 * (1 - choose_1_where)
sigma_dep1 = sigma*np.sqrt(abs(-24*a11*a2/(tau*a5)) / gamma)
sigma_dep2 = sigma*np.sqrt(abs(24*b1/(texp*b7)) / gamma)
sigma_depth = sigma_dep1 * choose_1_where + sigma_dep2 * (1 - choose_1_where)
if diagonal: # Assume uncertainties uncorrelated (Diagonal)
return np.array([
make_pd(np.diag([sigma_t0[i], sigma_period[i], sigma_duration[i], sigma_depth[i]])) for i in range(len(sigma_t0))
])
else:
warnings.warn("this is currently not the same covariance as when diagonal=True: covariance is of {tau, T, delta, f0}")
warnings.warn("dimensions here are for the scalar case")
cov = np.zeros(4,4)
if tau>=texp:
cov[0,0] = 24*tau*a10/(delta*delta*a5)
cov[0,1] = 36*a8*tau*a1/(delta*delta*a5)
cov[1,0] = cov[0,1]
cov[0,2] = -12*a11*a1/(delta*a5)
cov[2,0] = cov[0,2]
cov[0,3] = -12*a6*a1/(delta*a5)
cov[3,0] = cov[0,3]
cov[1,1] = 6*tau*a14/(delta*delta*a5)
cov[1,2] = 72*a8*a2/(delta*a5)
cov[2,1] = cov[1,2]
cov[1,3] = 6*a7/(delta*a5)
cov[3,1] = cov[1,3]
cov[2,2] = -24*a11*a2/(tau*a5)
cov[2,3] = -24*a6*a2/(tau*a5)
cov[3,2] = cov[2,3]
cov[3,3] = a12/(tau*a5)
else:
cov[0,0] = -24*texp*texp*b12/(delta*delta*tau*b7)
cov[0,1] = 36*texp*b13*b5/(delta*delta*b7)
cov[1,0] = cov[0,1]
cov[0,2] = 12*b5*b1/(delta*b7)
cov[0,2] = cov[2,0]
cov[0,3] = 12*b5*b2/(delta*b7)
cov[3,0] = cov[0,3]
cov[1,1] = 6*texp*b9/(delta*delta*b7)
cov[1,2] = 72*b13/(delta*b7)
cov[2,1] = cov[1,2]
cov[1,3] = 6*b3/(delta*b7)
cov[3,1] = cov[1,3]
cov[2,2] = 24*b1/(texp*b7)
cov[2,3] = 24*b2/(texp*b7)
cov[3,2] = cov[3,2]
cov[3,3] = b10/(texp*b7)
cov *= sigma*sigma/gamma
cov = make_pd(cov)
return cov
if __name__ == "__main__":
# for the sake of making this quick demo independent of the other files, I'll repeat a couple of functions
import os
import pandas as pd
import scipy.stats as stats
import sys
sys.path.append('..')
re = 0.009158
def get_a(period, mstar, Go4pi=2945.4625385377644/(4*np.pi*np.pi)):
return (Go4pi*period*period*mstar) ** (1./3)
kois = get_catalog('q1_q16_koi')
stellar = get_catalog('q1_q16_stellar')
kois = kois[kois["kepid"].isin(stellar["kepid"])]
kois = kois[np.isfinite(kois["koi_prad"])]
stellar = stellar[np.isfinite(stellar.mass)]
combined = | pd.merge(kois, stellar, on='kepid') | pandas.merge |
import unittest
from helpers import split_rows, merge_duplicate_taxa, get_date
import pandas as pd
from pandas._testing import assert_frame_equal, assert_series_equal
from unittest.mock import patch
from uuid import uuid4
import uuid
class GetSensibleDate(unittest.TestCase):
def test_constructs_date(self):
year, month, day = pd.Series(['19', '20']), pd.Series(['09', '11']), pd.Series(['01', '02'])
expected = pd.Series(['2019-09-01', '2020-11-02'])
assert_series_equal(get_date(year, month, day), expected)
def test_handles_1990s(self):
year, month, day = pd.Series(['98', '10']), pd.Series(['09', '11']), | pd.Series(['01', '02']) | pandas.Series |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
#import os
import numpy as np
import pandas as pd
from unittest import TestCase
from exatomic import gaussian
from exatomic.base import resource
from exatomic.gaussian import Output, Fchk
class TestFchk(TestCase):
def setUp(self):
self.mam1 = Fchk(resource('g09-ch3nh2-631g.fchk'))
self.mam2 = Fchk(resource('g09-ch3nh2-augccpvdz.fchk'))
self.mam3 = Fchk(resource('g16-methyloxirane-def2tzvp-freq.fchk'))
self.mam4 = Fchk(resource('g16-h2o2-def2tzvp-freq.fchk'))
self.nitro_nmr = Fchk(resource('g16-nitromalonamide-6-31++g-nmr.fchk'))
def test_parse_atom(self):
self.mam1.parse_atom()
self.assertEqual(self.mam1.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.atom))))
self.mam2.parse_atom()
self.assertEqual(self.mam2.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.atom))))
def test_parse_basis_set(self):
self.mam1.parse_basis_set()
self.assertEqual(self.mam1.basis_set.shape[0], 32)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.basis_set))))
self.mam2.parse_basis_set()
self.assertEqual(self.mam2.basis_set.shape[0], 53)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.basis_set))))
def test_parse_orbital(self):
self.mam1.parse_orbital()
self.assertEqual(self.mam1.orbital.shape[0], 28)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.orbital))))
self.mam2.parse_orbital()
self.assertEqual(self.mam2.orbital.shape[0], 91)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.orbital))))
def test_parse_momatrix(self):
self.mam1.parse_momatrix()
self.assertEqual(self.mam1.momatrix.shape[0], 784)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.momatrix))))
self.mam2.parse_momatrix()
self.assertEqual(self.mam2.momatrix.shape[0], 8281)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.momatrix))))
def test_parse_basis_set_order(self):
self.mam1.parse_basis_set_order()
self.assertEqual(self.mam1.basis_set_order.shape[0], 28)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.basis_set_order))))
self.mam2.parse_basis_set_order()
self.assertEqual(self.mam2.basis_set_order.shape[0], 91)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.basis_set_order))))
def test_parse_frame(self):
self.mam1.parse_frame()
self.assertEqual(self.mam1.frame.shape[0], 1)
self.assertTrue(np.all(pd.notnull( | pd.DataFrame(self.mam1.frame) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 22 20:22:15 2020
@author:
"""
class Comparison:
def __init__(self):
super().__init__()
#The goal of this function is to execute the models and show the differents results.
#It is the function to call when we want to test differents models
#with differents values for parameters
def run_comparison(self, stream, stream_n_features, window = 100,
estimators = 50, anomaly = 0.5, drift_rate = 0.3,
result_folder="Generated", max_sample=100000, n_wait=200,
metrics=['accuracy', 'f1', 'kappa', 'kappa_m',
'running_time','model_size']):
from skmultiflow.anomaly_detection import HalfSpaceTrees
from source.iforestasd_scikitmultiflow import IsolationForestStream
from skmultiflow.evaluation.evaluate_prequential import EvaluatePrequential
# Creation f the result csv
directory_path = 'results/'+str(result_folder)
self.check_directory(path=directory_path)
result_file_path = directory_path+'/result_for_WS'+str(window)+'_NE'+str(estimators)+'.csv'
# 2. Prepare for use This function is usefull to have data window by window
# stream.prepare_for_use() # Deprecated so how to prepare data?
models = [HalfSpaceTrees(n_features=stream_n_features, window_size=window,
n_estimators=estimators, anomaly_threshold=anomaly),
#IForest ASD use all the window_size for the sample in the training phase
IsolationForestStream(window_size=window, n_estimators=estimators,
anomaly_threshold=anomaly, drift_threshold=drift_rate)]
# Setup the evaluator
evaluator = EvaluatePrequential(pretrain_size=1, max_samples=max_sample,
show_plot=True,
metrics=metrics, batch_size=1,
output_file = result_file_path,
n_wait = n_wait)
# 4. Run the evaluation
evaluator.evaluate(stream=stream, model=models, model_names=['HSTrees','iForestASD'])
print("")
print("Please find evaluation results here "+result_file_path)
return
def get_dataset(self, dataset_name="Generator", classification_function=0,
noise_percentage=0.7, random_state=1):
#Dataset
# Name M(#instances) N(#attributes) Anomaly
# Threshold
# Http 567498 3 0.39%
# Smtp 95156 3 0.03%
# ForestCover 286048 10 0.96%
# Shuttle 49097 9 7.15%
if dataset_name=="Generator":
return self.get_data_generated(classification_function,
noise_percentage, random_state);
elif dataset_name=="HTTP":
path = "datasets/HTTP.csv"
return self.get_file_stream(path);
elif dataset_name=="ForestCover":
path = "datasets/ForestCover.csv"
return self.get_file_stream(path);
elif dataset_name=="Shuttle":
path = "datasets/Shuttle.csv"
return self.get_file_stream(path);
elif dataset_name=="SMTP":
path = "datasets/SMTP.csv"
return self.get_file_stream(path);
else:
print("The specified dataset do not exist yet."+
" Try to contact the administrator for any add. "+
" Or choose between these datasets:['Generator','HTTP','ForestCover','Shuttle','SMTP']");
return None
def get_file_stream(self, path):
from skmultiflow.data.file_stream import FileStream
return FileStream(path, n_targets=1, target_idx=-1)
def get_data_stream(self, path):
from skmultiflow.data.data_stream import DataStream
return
def get_data_generated(self,classification_function, noise_percentage, random_state):
from skmultiflow.data import SEAGenerator
return SEAGenerator(classification_function=classification_function,
noise_percentage=noise_percentage, random_state=random_state)
#To transform datasets by replace anomaly label by 1 and normal label by 0
def prepare_dataset_for_anomaly(self, full_dataset, y_column:int,
anomaly_label:str='\'Anomaly\'', file_name:str="new"):
import numpy as np
import pandas as pd
full_dataset[y_column] = np.where(full_dataset[y_column]==anomaly_label,1,0)
dataset = pd.DataFrame(full_dataset)
dataset.drop([0], inplace=True)
full_file_path = "../datasets/"+file_name+".csv"
dataset.to_csv(full_file_path, index=None, header=True)
return dataset
def check_directory(self,path):
from pathlib import Path
Path(path).mkdir(parents=True, exist_ok=True)
def merge_file(self, folder_path, output_file = 'output.csv'):
import os
import pandas as pd
result = | pd.DataFrame() | pandas.DataFrame |
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2021/10/14 12:19
Desc: 巨潮资讯-数据中心-专题统计-债券报表-债券发行
http://webapi.cninfo.com.cn/#/thematicStatistics
"""
import time
import pandas as pd
import requests
from py_mini_racer import py_mini_racer
js_str = """
function mcode(input) {
var keyStr = "<KEY> <KEY>;
var output = "";
var chr1, chr2, chr3 = "";
var enc1, enc2, enc3, enc4 = "";
var i = 0;
do {
chr1 = input.charCodeAt(i++);
chr2 = input.charCodeAt(i++);
chr3 = input.charCodeAt(i++);
enc1 = chr1 >> 2;
enc2 = ((chr1 & 3) << 4) | (chr2 >> 4);
enc3 = ((chr2 & 15) << 2) | (chr3 >> 6);
enc4 = chr3 & 63;
if (isNaN(chr2)) {
enc3 = enc4 = 64;
} else if (isNaN(chr3)) {
enc4 = 64;
}
output = output + keyStr.charAt(enc1) + keyStr.charAt(enc2)
+ keyStr.charAt(enc3) + keyStr.charAt(enc4);
chr1 = chr2 = chr3 = "";
enc1 = enc2 = enc3 = enc4 = "";
} while (i < input.length);
return output;
}
"""
def bond_treasure_issue_cninfo(
start_date: str = "20210910", end_date: str = "20211109"
) -> pd.DataFrame:
"""
巨潮资讯-数据中心-专题统计-债券报表-债券发行-国债发行
http://webapi.cninfo.com.cn/#/thematicStatistics
:param start_date: 开始统计时间
:type start_date: str
:param end_date: 开始统计时间
:type end_date: str
:return: 国债发行
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1120"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"sdate": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"edate": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.rename(
columns={
"F009D": "缴款日",
"SECNAME": "债券简称",
"DECLAREDATE": "公告日期",
"F004D": "发行起始日",
"F003D": "发行终止日",
"F008N": "单位面值",
"SECCODE": "债券代码",
"F007N": "发行价格",
"F006N": "计划发行总量",
"F005N": "实际发行总量",
"F028N": "增发次数",
"BONDNAME": "债券名称",
"F014V": "发行对象",
"F002V": "交易市场",
"F013V": "发行方式",
},
inplace=True,
)
temp_df = temp_df[
[
"债券代码",
"债券简称",
"发行起始日",
"发行终止日",
"计划发行总量",
"实际发行总量",
"发行价格",
"单位面值",
"缴款日",
"增发次数",
"交易市场",
"发行方式",
"发行对象",
"公告日期",
"债券名称",
]
]
temp_df["发行起始日"] = pd.to_datetime(temp_df["发行起始日"]).dt.date
temp_df["发行终止日"] = pd.to_datetime(temp_df["发行终止日"]).dt.date
temp_df["缴款日"] = pd.to_datetime(temp_df["缴款日"]).dt.date
temp_df["公告日期"] = pd.to_datetime(temp_df["公告日期"]).dt.date
temp_df["计划发行总量"] = pd.to_numeric(temp_df["计划发行总量"])
temp_df["实际发行总量"] = pd.to_numeric(temp_df["实际发行总量"])
temp_df["发行价格"] = pd.to_numeric(temp_df["发行价格"])
temp_df["单位面值"] = pd.to_numeric(temp_df["单位面值"])
temp_df["增发次数"] = pd.to_numeric(temp_df["增发次数"])
return temp_df
def bond_local_government_issue_cninfo(
start_date: str = "20210911", end_date: str = "20211110"
) -> pd.DataFrame:
"""
巨潮资讯-数据中心-专题统计-债券报表-债券发行-地方债发行
http://webapi.cninfo.com.cn/#/thematicStatistics
:param start_date: 开始统计时间
:type start_date: str
:param end_date: 开始统计时间
:type end_date: str
:return: 地方债发行
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1121"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"sdate": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"edate": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.rename(
columns={
"F009D": "缴款日",
"SECNAME": "债券简称",
"DECLAREDATE": "公告日期",
"F004D": "发行起始日",
"F003D": "发行终止日",
"F008N": "单位面值",
"SECCODE": "债券代码",
"F007N": "发行价格",
"F006N": "计划发行总量",
"F005N": "实际发行总量",
"F028N": "增发次数",
"BONDNAME": "债券名称",
"F014V": "发行对象",
"F002V": "交易市场",
"F013V": "发行方式",
},
inplace=True,
)
temp_df = temp_df[
[
"债券代码",
"债券简称",
"发行起始日",
"发行终止日",
"计划发行总量",
"实际发行总量",
"发行价格",
"单位面值",
"缴款日",
"增发次数",
"交易市场",
"发行方式",
"发行对象",
"公告日期",
"债券名称",
]
]
temp_df["发行起始日"] = pd.to_datetime(temp_df["发行起始日"]).dt.date
temp_df["发行终止日"] = pd.to_datetime(temp_df["发行终止日"]).dt.date
temp_df["缴款日"] = pd.to_datetime(temp_df["缴款日"]).dt.date
temp_df["公告日期"] = pd.to_datetime(temp_df["公告日期"]).dt.date
temp_df["计划发行总量"] = pd.to_numeric(temp_df["计划发行总量"])
temp_df["实际发行总量"] = pd.to_numeric(temp_df["实际发行总量"])
temp_df["发行价格"] = pd.to_numeric(temp_df["发行价格"])
temp_df["单位面值"] = pd.to_numeric(temp_df["单位面值"])
temp_df["增发次数"] = pd.to_numeric(temp_df["增发次数"])
return temp_df
def bond_corporate_issue_cninfo(
start_date: str = "20210911", end_date: str = "20211110"
) -> pd.DataFrame:
"""
巨潮资讯-数据中心-专题统计-债券报表-债券发行-企业债发行
http://webapi.cninfo.com.cn/#/thematicStatistics
:param start_date: 开始统计时间
:type start_date: str
:param end_date: 开始统计时间
:type end_date: str
:return: 企业债发行
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1122"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"sdate": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"edate": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.rename(
columns={
"SECNAME": "债券简称",
"DECLAREDATE": "公告日期",
"F004D": "交易所网上发行终止日",
"F003D": "交易所网上发行起始日",
"F008N": "发行面值",
"SECCODE": "债券代码",
"F007N": "发行价格",
"F006N": "实际发行总量",
"F005N": "计划发行总量",
"F022N": "最小认购单位",
"F017V": "承销方式",
"F052N": "最低认购额",
"F015V": "发行范围",
"BONDNAME": "债券名称",
"F014V": "发行对象",
"F013V": "发行方式",
"F023V": "募资用途说明",
},
inplace=True,
)
temp_df = temp_df[
[
"债券代码",
"债券简称",
"公告日期",
"交易所网上发行起始日",
"交易所网上发行终止日",
"计划发行总量",
"实际发行总量",
"发行面值",
"发行价格",
"发行方式",
"发行对象",
"发行范围",
"承销方式",
"最小认购单位",
"募资用途说明",
"最低认购额",
"债券名称",
]
]
temp_df["公告日期"] = pd.to_da | tetime(temp_df["公告日期"]) | pandas.to_datetime |
# url: https://www.kaggle.com/ash316/eda-to-prediction-dietanic
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("fivethirtyeight")
import warnings
warnings.filterwarnings("ignore")
# %matplotlib inline
data = pd.read_csv("./titanic/train.csv")
data.head()
# checking for total null values
# The Age, Cabin and Embarked have null values. I will try to fix them
data.isnull().sum()
# How many survived?
f, ax = plt.subplots(1, 2, figsize=(18,8))
data["Survived"].value_counts().plot.pie(explode=[0,0.1], autopct="%1.1f%%", ax=ax[0], shadow=True)
ax[0].set_title("Survived")
ax[0].set_ylabel("")
sns.countplot("Survived", data=data, ax=ax[1])
ax[1].set_title("Survived")
plt.show()
# It is evident that not many passengers survived the accident
# Out of 891 passengers in training set, only around 350 survived i.e
# Only 38.4% of the total training set survvied the crash.
# We need to dig down more to get better insights from the data and see which categories of the passengers did survived and who didn't.
# We will try to check the survival rate by using the different features of the dataset.
# Some of the features being Sex, Port Of Embarcation, Age, etc.
# First let us understand the different types of features.
# > Types of Features
# >> Categorical Features:
# A categorical variable is one that has two or more categories and each value in that feature can be categorised by them.
# For example, gender is a categorical variable having two categories (male and female).
# Now we cannot sort or give any ordering to such variables.
# They are also known as "Nominal Variables".
# Categorical Features in the dataset: Sex, Embarked
# >> Ordinal Features:
# An ordinal variables is similar to categorical values, bu the difference between them is that we can have relative ordering or sorting between the values.
# For eg: If we have a feature like Height with values Tall, Medium, Short, then Height is an ordinal variable.
# Here we can have a relative sort in the variable.
# Ordinal Features in the dataset: PClass
# >> Continuous Feature:
# A feature is said to be continuous if it can take values between any two points or between the minimum or maximum valkues in the features column.
# Continuous Features in the dataset: Age
# Analysing the Features
# Sex --> Categorical Feature
data.groupby(["Sex", "Survived"])["Survived"].count()
f, ax = plt.subplots(1, 2, figsize=(18,8))
data[["Sex", "Survived"]].groupby(["Sex"]).mean().plot.bar(ax=ax[0])
ax[0].set_title("Survived vs Sex")
sns.countplot("Sex", hue="Survived", data=data, ax=ax[1])
ax[1].set_title("Sex: Survived vs Dead")
plt.show()
# This looks interesting.
# The number of men on the ship is lot more than the number of women.
# Still the number of women saved is almost twice the number of maels saved.
# The survival rates for a women on the ship is around 75% while that for men is around 18-19%.
# This looks to be a very important feature for modeling. But is it the best?
# Lets check other features.
# Pclass --> Ordinal Feature
pd.crosstab(data.Pclass, data.Survived, margins=True)#.style.backgground_gradient(cmap="summer_r")
f, ax=plt.subplots(1, 2, figsize=(18,8))
data["Pclass"].value_counts().plot.bar(color=["#CD7F32", "#FFDF00", "#D3D3D3"], ax=ax[0])
ax[0].set_title("Number of Passengers by Pclass")
ax[0].set_ylabel("Count")
sns.countplot("Pclass", hue="Survived", data=data, ax=ax[1])
ax[1].set_title("Pclass: Survived vs Dead")
plt.show()
# People say Money Can't Buy Everything.
# But we can clearly see that Passengers of Pclass 1 were given a very high priority while rescue.
# Even though the number of passengers in Pclass 3 were a lot higher, still the number of survival from themis very low, sometimes around 25%.
# For Pclass 1 %survived is around 63% while for Pclass2 is around 48%.
# So money and status matters. Such a materialistic world.
# Lets Dive in little bit more and check for other intersting observations.
# Lets check survival rate with Sex and Pclass Together.
pd.crosstab(data["Sex"], data["Survived"], margins=True)
sns.catplot("Pclass", "Survived", hue="Sex", data=data, kind="point")
plt.show()
# We use catplot (a.k.a. FactorPlot) in this case, because they make the separation of categorical values easy.
# Looking at the CrossTab and the FactorPlot, we can easily infer that survival for Women from Pclass1 is about 95-96%, as only 3 out of 94 Women from Pclass1 died.
# It is evident that irrespective of Pclass, Women were given first priority while rescue.
# Even Men from Pclass1 have a very low survival rate.
# Looks like Pclass is also an important feature. Lets analyse other features.
# > Age --> Continuous Feature
print("Oldest Passenger was of:", data["Age"].max(), "Years")
print("Youngest Paseenger was of:", data["Age"].min(), "Years")
print("Average Age on the ship:", data["Age"].mean(), "Years")
f, ax = plt.subplots(1, 2, figsize=(18,8))
sns.violinplot("Pclass", "Age", hue="Survived", data=data, split=True, ax=ax[0])
ax[0].set_title("Pclass and Age vs Survived")
ax[0].set_yticks(range(0,110,10))
sns.violinplot("Sex", "Age", hue="Survived", data=data, split=True, ax=ax[1])
ax[1].set_title("Sex and Age vs Survived")
ax[1].set_yticks(range(0, 110, 10))
plt.show()
# Observations:
# 1) The number of children increases with Pclass and the survival rate for passengers below Age 10 (i.e children) looks to be good irrespective of the Pclass
# 2) Survival chances for Passengers aged 20-50 from Pclass1 is high and is even better for Women
# 3) For males, the survival chances decreases with an increase in age.
# As we had seen earlier, the Age feature has 177 null values.
# To replace these NaN values, we can assign them the mean age of the dataset.
# But the problem is there were many people with many different ages.
# We just can't assign a 4 year kid with the mean age that is 29 years
# Is there any way to find out what age-band does the passenger lie?
# Bingo!!, we can check the Name feature.
# Looking upon the feature, we can see that the names have a saluation like Mr or Mrs.
# Thus we can assign the mean values of Mr and Mrs to the respective groups.
# What's in A Name?? --> Feature :p
data["Initial"]=0
for i in data:
data["Initial"] = data.Name.str.extract("([A-Za-z]+)\.") #lets extract the Saluation
# Okay so here we are using the Regex: [A-Za-z]+)..
# So what it does is, it looks for strings which lie between A-Z or a-z and followed by a .(dot).
# So we successfully extract the initials from the Name.
pd.crosstab(data.Initial, data.Sex)#.T
# Okay so there are some misspelled initials like Mlle or Mme that stand for Miss.
# I willl replace them with Miss and same thing for other values.
data["Initial"].replace(["Mlle", "Mme", "Ms", "Dr", "Major", "Lady", "Countess", "Jonkheer", "Col", "Rev", "Capt", "Sir", "Don"],
["Miss", "Miss", "Miss", "Mr", "Mr", "Mrs", "Mrs", "Other", "Other", "Other", "Mr", "Mr", "Mr"], inplace=True)
data.groupby("Initial")["Age"].mean()
# Filling NaN Ages
data.loc[(data.Age.isnull())&(data.Initial=="Mr"), "Age"] = 33
data.loc[(data.Age.isnull())&(data.Initial=="Mrs"), "Age"] = 36
data.loc[(data.Age.isnull())&(data.Initial=="Master"), "Age"] = 5
data.loc[(data.Age.isnull())&(data.Initial=="Miss"), "Age"] = 22
data.loc[(data.Age.isnull())&(data.Initial=="Other"), "Age"] = 46
data.Age.isnull().any()
f, ax = plt.subplots(1, 2, figsize=(20,10))
data[data["Survived"]==0].Age.plot.hist(ax=ax[0], bins=20, edgecolor="black", color="red")
ax[0].set_title("Survived=0")
x1 = list(range(0, 85, 5))
ax[0].set_xticks(x1)
data[data["Survived"]==1].Age.plot.hist(ax=ax[1], color="green", bins=20, edgecolor="black")
ax[1].set_title("Survived=1")
x2 = list(range(0, 85, 5))
ax[1].set_xticks(x2)
plt.show()
# Observations:
# 1) The Toddlers (age<5) were saved in large numbers (The Women and Child First Policy)
# 2) The oldest Passengers was saved (80 years).
# 3) Maximum number of deaths were in the age group of 30-40.
sns.catplot("Pclass", "Survived", col="Initial", kind="point", data=data)
plt.show()
# The Women and Child first policy thus holds true irrespective of the class.
# >> Embarked --> Categorical value
| pd.crosstab([data.Embarked, data.Pclass], [data.Sex, data.Survived], margins=True) | pandas.crosstab |
import pandas as pd
import numpy as np
from pandas.api.types import is_numeric_dtype, is_string_dtype
from pandas.tseries.offsets import Tick, BusinessDay, Week, MonthEnd
from pandas.tseries.frequencies import to_offset
from timeseries_preparation.h2g2 import H2G2
from safe_logger import SafeLogger
logger = SafeLogger("Forecast plugin")
class TimeseriesPreparator:
"""
Class to check the timeseries has the right data and prepare it to have regular date interval
Attributes:
time_column_name (str): Name of the time column
frequency (list): Pandas timeseries frequency (e.g. '3M')
target_columns_names (list, optional): List of column names to predict
timeseries_identifiers_names (list, optional): Columns to identify multiple time series when data is in long format
external_features_columns_names (list, optional): List of columns with dynamic real features over time
max_timeseries_length (int, optional): Maximum number of records to keep in the timeseries
timeseries_identifiers_values (list, optional): List of dict with timeseries identifiers name as keys and timeseries identifiers values as values
prediction_length (int, optional): Number of records to predict
"""
def __init__(
self,
time_column_name,
frequency,
target_columns_names=None,
timeseries_identifiers_names=None,
external_features_columns_names=None,
max_timeseries_length=None,
timeseries_identifiers_values=None,
prediction_length=None,
):
self.time_column_name = time_column_name
self.frequency = frequency
self.target_columns_names = target_columns_names
self.timeseries_identifiers_names = timeseries_identifiers_names
self.external_features_columns_names = external_features_columns_names
self.max_timeseries_length = max_timeseries_length
self.timeseries_identifiers_values = timeseries_identifiers_values
self.prediction_length = prediction_length
def prepare_timeseries_dataframe(self, dataframe):
"""Convert time column to pandas.Datetime without timezones. Truncate dates to selected frequency.
Check that there are no duplicate dates and that there are no missing dates.
Sort timeseries. Keep only the most recent dates of each timeseries if specified.
Args:
dataframe (DataFrame)
Raises:
ValueError: If the time column cannot be parsed as a date by pandas.
Returns:
Prepared timeseries
"""
self._check_data(dataframe)
dataframe_prepared = dataframe.copy()
try:
dataframe_prepared[self.time_column_name] = pd.to_datetime(dataframe[self.time_column_name]).dt.tz_localize(
tz=None
)
except Exception:
raise ValueError(f"Please parse the date column '{self.time_column_name}' in a Prepare recipe")
dataframe_prepared = self._truncate_dates(dataframe_prepared)
dataframe_prepared = self._sort(dataframe_prepared)
self._check_regular_frequency(dataframe_prepared)
log_message_prefix = "Found"
self._log_timeseries_lengths(dataframe_prepared, log_message_prefix=log_message_prefix)
if self.max_timeseries_length:
dataframe_prepared = self._keep_last_dates(dataframe_prepared)
log_message_prefix = f"Sampling {self.max_timeseries_length} last records, obtained"
self._log_timeseries_lengths(dataframe_prepared, log_message_prefix=log_message_prefix)
if self.timeseries_identifiers_names:
if self.timeseries_identifiers_values:
self._check_identifiers_values(dataframe_prepared)
else:
self.timeseries_identifiers_values = (
dataframe_prepared[self.timeseries_identifiers_names].drop_duplicates().to_dict("records")
)
return dataframe_prepared
def check_schema_from_dataset(self, dataset_schema):
dataset_columns = [column["name"] for column in dataset_schema]
expected_columns = (
[self.time_column_name]
+ (self.target_columns_names or [])
+ (self.timeseries_identifiers_names or [])
+ (self.external_features_columns_names or [])
)
if not set(expected_columns).issubset(set(dataset_columns)):
raise ValueError(f"Dataset of historical data must contain the following columns: {expected_columns}")
def serialize(self):
return dict(
time_column_name=self.time_column_name,
frequency=self.frequency,
target_columns_names=self.target_columns_names,
timeseries_identifiers_names=self.timeseries_identifiers_names,
external_features_columns_names=self.external_features_columns_names,
max_timeseries_length=self.max_timeseries_length,
timeseries_identifiers_values=self.timeseries_identifiers_values,
prediction_length=self.prediction_length,
)
@classmethod
def deserialize(cls, parameters):
return cls(**parameters)
def _check_identifiers_values(self, dataframe):
historical_timeseries_identifiers_values = (
dataframe[self.timeseries_identifiers_names].drop_duplicates().to_dict("records")
)
if (
len(
[
identifiers_dict
for identifiers_dict in historical_timeseries_identifiers_values
if identifiers_dict not in self.timeseries_identifiers_values
]
)
> 0
):
raise ValueError(
f"Dataset of historical data must only contain timeseries identifiers values that were used during training.\n"
+ f"Historical data contains: {historical_timeseries_identifiers_values}.\n"
+ f"Training data contains: {self.timeseries_identifiers_values}."
)
def _check_data(self, df):
self._check_not_empty_dataframe(df)
self._check_timeseries_identifiers_columns_types(df)
self._check_target_columns_types(df)
self._check_external_features_columns_types(df)
self._check_no_missing_values(df)
def _truncate_dates(self, df):
"""Truncate dates to selected frequency. For Week/Month/Year, truncate to end of Week/Month/Year.
Check there are no duplicate dates.
Examples:
'2020-12-15 12:45:30' becomes '2020-12-15 12:40:00' with frequency '20min'
'2020-12-15 12:00:00' becomes '2020-12-15 00:00:00' with frequency '24H'
'2020-12-15 12:30:00' becomes '2020-12-15 00:00:00' with frequency 'D'
'2020-12-15 12:30:00' becomes '2020-12-31 00:00:00' with frequency 'M'
'2020-12-15 12:30:00' becomes '2021-12-31 00:00:00' with frequency '6M'
Args:
df (DataFrame): Dataframe in wide or long format with a time column.
Raises:
ValueError: If there are duplicates dates before or after truncation.
Returns:
Sorted DataFrame with truncated dates.
"""
df_truncated = df.copy()
error_message_suffix = (
". Please check the Long format parameter." if not self.timeseries_identifiers_names else "."
)
self._check_duplicate_dates(df_truncated, error_message_suffix=error_message_suffix)
frequency_offset = | to_offset(self.frequency) | pandas.tseries.frequencies.to_offset |
from pathlib import Path
import re
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas.compat import is_platform_windows
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
Series,
_testing as tm,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
from pandas.io.pytables import TableIterator
pytestmark = pytest.mark.single
def test_read_missing_key_close_store(setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with HDFStore(path, "r") as store:
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
read_hdf(store, "k1")
def test_read_column(setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# GH 17912
# HDFStore.select_column should raise a KeyError
# exception if the key is not a valid store
with pytest.raises(KeyError, match="No object named df in the file"):
store.select_column("df", "index")
store.append("df", df)
# error
with pytest.raises(
KeyError, match=re.escape("'column [foo] not found in the table'")
):
store.select_column("df", "foo")
msg = re.escape("select_column() got an unexpected keyword argument 'where'")
with pytest.raises(TypeError, match=msg):
store.select_column("df", "index", where=["index>5"])
# valid
result = store.select_column("df", "index")
tm.assert_almost_equal(result.values, Series(df.index).values)
assert isinstance(result, Series)
# not a data indexable column
msg = re.escape(
"column [values_block_0] can not be extracted individually; "
"it is not data indexable"
)
with pytest.raises(ValueError, match=msg):
store.select_column("df", "values_block_0")
# a data column
df2 = df.copy()
df2["string"] = "foo"
store.append("df2", df2, data_columns=["string"])
result = store.select_column("df2", "string")
tm.assert_almost_equal(result.values, df2["string"].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3["string"] = "foo"
df3.loc[df3.index[4:6], "string"] = np.nan
store.append("df3", df3, data_columns=["string"])
result = store.select_column("df3", "string")
tm.assert_almost_equal(result.values, df3["string"].values)
# start/stop
result = store.select_column("df3", "string", start=2)
tm.assert_almost_equal(result.values, df3["string"].values[2:])
result = store.select_column("df3", "string", start=-2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:])
result = store.select_column("df3", "string", stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[:2])
result = store.select_column("df3", "string", stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[:-2])
result = store.select_column("df3", "string", start=2, stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[2:-2])
result = store.select_column("df3", "string", start=-2, stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({"A": np.random.randn(10), "B": "foo"})
store.append("df4", df4, data_columns=True)
expected = df4["B"]
result = store.select_column("df4", "B")
tm.assert_series_equal(result, expected)
def test_pytables_native_read(datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf/pytables_native.h5"), mode="r"
) as store:
d2 = store["detector/readout"]
assert isinstance(d2, DataFrame)
@pytest.mark.skipif(is_platform_windows(), reason="native2 read fails oddly on windows")
def test_pytables_native2_read(datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "pytables_native2.h5"), mode="r"
) as store:
str(store)
d1 = store["detector"]
assert isinstance(d1, DataFrame)
def test_legacy_table_fixed_format_read_py2(datapath, setup_path):
# GH 24510
# legacy table with fixed format written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_fixed_py2.h5"), mode="r"
) as store:
result = store.select("df")
expected = DataFrame(
[[1, 2, 3, "D"]],
columns=["A", "B", "C", "D"],
index=Index(["ABC"], name="INDEX_NAME"),
)
tm.assert_frame_equal(expected, result)
def test_legacy_table_fixed_format_read_datetime_py2(datapath, setup_path):
# GH 31750
# legacy table with fixed format and datetime64 column written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_fixed_datetime_py2.h5"),
mode="r",
) as store:
result = store.select("df")
expected = DataFrame(
[[Timestamp("2020-02-06T18:00")]],
columns=["A"],
index=Index(["date"]),
)
tm.assert_frame_equal(expected, result)
def test_legacy_table_read_py2(datapath, setup_path):
# issue: 24925
# legacy table written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_py2.h5"), mode="r"
) as store:
result = store.select("table")
expected = DataFrame({"a": ["a", "b"], "b": [2, 3]})
tm.assert_frame_equal(expected, result)
def test_read_hdf_open_store(setup_path):
# GH10330
# No check for non-string path_or-buf, and no test of open store
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
df.index.name = "letters"
df = df.set_index(keys="E", append=True)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
direct = read_hdf(path, "df")
store = HDFStore(path, mode="r")
indirect = read_hdf(store, "df")
tm.assert_frame_equal(direct, indirect)
assert store.is_open
store.close()
def test_read_hdf_iterator(setup_path):
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
df.index.name = "letters"
df = df.set_index(keys="E", append=True)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w", format="t")
direct = read_hdf(path, "df")
iterator = read_hdf(path, "df", iterator=True)
assert isinstance(iterator, TableIterator)
indirect = next(iterator.__iter__())
tm.assert_frame_equal(direct, indirect)
iterator.store.close()
def test_read_nokey(setup_path):
# GH10443
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
# Categorical dtype not supported for "fixed" format. So no need
# to test with that dtype in the dataframe here.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="a")
reread = read_hdf(path)
tm.assert_frame_equal(df, reread)
df.to_hdf(path, "df2", mode="a")
msg = "key must be provided when HDF5 file contains multiple datasets."
with pytest.raises(ValueError, match=msg):
read_hdf(path)
def test_read_nokey_table(setup_path):
# GH13231
df = DataFrame({"i": range(5), "c": Series(list("abacd"), dtype="category")})
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="a", format="table")
reread = read_hdf(path)
tm.assert_frame_equal(df, reread)
df.to_hdf(path, "df2", mode="a", format="table")
msg = "key must be provided when HDF5 file contains multiple datasets."
with pytest.raises(ValueError, match=msg):
read_hdf(path)
def test_read_nokey_empty(setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path)
store.close()
msg = re.escape(
"Dataset(s) incompatible with Pandas data types, not table, or no "
"datasets found in HDF5 file."
)
with pytest.raises(ValueError, match=msg):
read_hdf(path)
def test_read_from_pathlib_path(setup_path):
# GH11773
expected = DataFrame(
np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE")
)
with ensure_clean_path(setup_path) as filename:
path_obj = Path(filename)
expected.to_hdf(path_obj, "df", mode="a")
actual = read_hdf(path_obj, "df")
tm.assert_frame_equal(expected, actual)
@td.skip_if_no("py.path")
def test_read_from_py_localpath(setup_path):
# GH11773
from py.path import local as LocalPath
expected = DataFrame(
np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE")
)
with ensure_clean_path(setup_path) as filename:
path_obj = LocalPath(filename)
expected.to_hdf(path_obj, "df", mode="a")
actual = read_hdf(path_obj, "df")
tm.assert_frame_equal(expected, actual)
@pytest.mark.parametrize("format", ["fixed", "table"])
def test_read_hdf_series_mode_r(format, setup_path):
# GH 16583
# Tests that reading a Series saved to an HDF file
# still works if a mode='r' argument is supplied
series = tm.makeFloatSeries()
with ensure_clean_path(setup_path) as path:
series.to_hdf(path, key="data", format=format)
result = read_hdf(path, key="data", mode="r")
| tm.assert_series_equal(result, series) | pandas._testing.assert_series_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 9 16:23:57 2020
@author: esteban
"""
ultimaFechaCasos='2020-05-20'
titulo='Letalidad por Región al 20 de Mayo'
fatalidad='Letalidad [%]'
data_size=fatalidad
fontsize=24
fontsizenames=30
import pandas as pd
import os
from matplotlib import font_manager as fm, rcParams
import matplotlib.pyplot as plt
import seaborn as sns
fpath = os.path.join(rcParams["datapath"],"../Montserrat-Regular.ttf")
prop = fm.FontProperties(size= 20,fname="../Montserrat-Regular.ttf")
fname = os.path.split(fpath)[1]
############################################################
# Datos confirmados por region
path='../../COVID19_Chile_Regiones-casos_totales.CSV'
casos= | pd.read_csv(path) | pandas.read_csv |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
def test_tdi_sub_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, delta, box):
# only test adding/sub offsets as + is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = | tm.box_expected(rng, box) | pandas.util.testing.box_expected |
# -*- coding: utf-8 -*-
from ..data import Data, DataSamples
from ..cross import DecisionTree, Crosses
#from ..woe import WOE
import pandas as pd
#import math as m
import numpy as np
import matplotlib
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import seaborn as sns
from sklearn.model_selection import cross_val_score, StratifiedKFold, train_test_split, GridSearchCV, PredefinedSplit
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_auc_score, roc_curve, auc, r2_score
from scipy.stats import chi2, chisquare, ks_2samp, ttest_ind
#import statsmodels.formula.api as sm
import warnings
from abc import ABCMeta, abstractmethod
#from sklearn.feature_selection import GenericUnivariateSelect, f_classif
from patsy import dmatrices
from statsmodels.stats.outliers_influence import variance_inflation_factor
import re
import ast
import os
import xlsxwriter
from PIL import Image
import datetime
from dateutil.relativedelta import *
from scipy.optimize import minimize
import copy
import itertools
import calendar
warnings.simplefilter('ignore')
plt.rc('font', family='Verdana')
plt.style.use('seaborn-darkgrid')
pd.set_option('display.precision', 3)
class ScoringModel(metaclass = ABCMeta):
'''
Base class for binary scoring models
'''
@abstractmethod
def __init__(self, model):
self.model = model
self.features = []
@abstractmethod
def fit(self, data):
pass
def predict(self, data, woe_transform=None):
'''
Predicts probability of target = 1
Parameters
-----------
data: Data to use for prediction, Data type
woe_transform: a WOE object to perform WoE-transformation before using model
Returns
-----------
matrix with shape [(sample size) X (number of classes)]
'''
if woe_transform is not None:
data=woe_transform.transform(data, keep_essential=True, original_values=True, calc_gini=False)
if self.features == []:
self.features = data.features
return self.model.predict_proba(data.dataframe[self.features])
def roc_curve(self, data, woe_transform=None, figsize=(10,7), filename = 'roc_curve', verbose = True):
'''
Displays ROC-curve and Gini coefficient for the model
Parameters
-----------
data: a Data or DataSamples object
woe_transform: a WOE object to perform WoE-transformation before using model
figsize: a tuple for graph size
filename: name of the picture with roc_curve
verbose: show/not show roc_curve in output
Returns
----------
a list of gini values per input sample
'''
if woe_transform is not None:
data=woe_transform.transform(data, keep_essential=True, original_values=True, calc_gini=False)
tpr={}
fpr={}
roc_auc={}
if type(data)==DataSamples:
samples=[data.train, data.validate, data.test]
sample_names=['Train', 'Validate', 'Test']
for si in range(len(samples)):
if samples[si] is not None:
preds = self.predict(samples[si])[:,1]
fpr[samples[si].name], tpr[samples[si].name], _ = roc_curve(samples[si].dataframe[samples[si].target], preds)
roc_auc[samples[si].name] = auc(fpr[samples[si].name], tpr[samples[si].name])
else:
fpr[sample_names[si]]=None
tpr[sample_names[si]]=None
roc_auc[sample_names[si]]=None
else:
preds = self.predict(data)[:,1]
fpr['Data'], tpr['Data'], _ = roc_curve(data.dataframe[data.target], preds)
roc_auc['Data'] = auc(fpr['Data'], tpr['Data'])
if verbose or (filename is not None):
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
# Plot tpr vs 1-fpr
for sample in roc_auc:
if roc_auc[sample] is not None:
ax.plot(fpr[sample], tpr[sample], label=sample+' (AUC = %f)' % roc_auc[sample])
ax.plot(tpr[list(tpr)[0]],tpr[list(tpr)[0]])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.legend()
if filename is not None:
plt.savefig(filename + ".png", dpi=100, bbox_inches='tight')
if verbose:
plt.show()
if verbose or (filename is not None):
plt.close()
ginis=[]
for sample in roc_auc:
if roc_auc[sample] is not None:
gini = round((roc_auc[sample]*2 - 1)*100, 2)
ginis.append(gini)
if verbose:
print ('Gini '+sample, gini)
return ginis
#---------------------------------------------------------------
class DecisionTreeModel(ScoringModel):
'''
Decision tree classifier
'''
def __init__(self, **args):
self.model = DecisionTreeClassifier(**args)
self.features = []
def fit(self, data):
if data.weights != None:
self.model.fit(data.dataframe[data.features], data.dataframe[data.target], sample_weight = np.array(data.dataframe[data.weights]))
else:
self.model.fit(data.dataframe[data.features], data.dataframe[data.target])
#---------------------------------------------------------------
class LogisticRegressionModel(ScoringModel):
'''
Logistic Regression for scoring.
Contains LogisticRegressionClassifier, its coefficients and intercept, scores and scoring card.
An object of this class can selected features, fit, edit coefficients, predict probabilities, calculate scores and transform a scorecard to SAS-code.
'''
def __init__(self, **args):
self.model = LogisticRegression(**args)
self.regularization = self.model.get_params()['penalty']
self.regularization_value = self.model.get_params()['C']
self.solver = self.model.get_params()['solver']
# Checks the type of optimizator as it is important for models on weighted samples
if self.model.solver != 'sag' and self.model.solver != 'newton-cg' and self.model.solver != 'lbfgs':
print ('Warning: this model does not support sample weights! For weighted scoring please use solver sag, newton-cg or lbfgs')
self.coefs = {}
self.features = []
self.scorecard = pd.DataFrame()
self.selected = []
#added 23.08.2018 by <NAME>
def inf_criterion(self, data, model=None, features=None, criterion='AIC', woe_transform=None):
'''
Calculation of information criterion (AIC/BIC) for given model on given data
Parameters
-----------
data: data for calculation
model: model with coefficients, that will be used to calculate information criterion
features: features to be used for information criterion calculation (in case model
was not fitted using its own selected features - model.selected)
criterion: type of information criterion to calculate
woe_transform: a woe object with binning information to perform WoE-transformation
Returns
-----------
value of information criterion
'''
if features is None:
features_initial=self.selected.copy()
else:
features_initial=features.copy()
if model is None:
model_to_check=self.model
else:
model_to_check=model
if woe_transform is not None:
data=woe_transform.transform(data, keep_essential=True, original_values=True, calc_gini=False)
features_kept=[]
weights_crit=[model_to_check.intercept_[0]]
for i in range(len(features_initial)):
if model_to_check.coef_[0][i]!=0:
features_kept.append(features_initial[i])
weights_crit.append(model_to_check.coef_[0][i])
intercept_crit = np.ones((data.dataframe.shape[0], 1))
features_crit = np.hstack((intercept_crit, data.dataframe[features_kept]))
scores_crit = np.dot(features_crit, weights_crit)
if data.weights is not None:
ll = np.sum(data.dataframe[data.weights]*(data.dataframe[data.target]*scores_crit - np.log(np.exp(scores_crit) + 1)))
else:
ll = np.sum(data.dataframe[data.target]*scores_crit - np.log(np.exp(scores_crit) + 1))
if criterion in ['aic', 'AIC']:
return 2*len(weights_crit)-2*ll
elif criterion in ['bic', 'BIC', 'sic', 'SIC', 'sbic', 'SBIC']:
if data.weights is not None:
return len(weights_crit)*np.log(data.dataframe[data.weights].sum())-2*ll
else:
return len(weights_crit)*np.log(data.dataframe.shape[0])-2*ll
#added 23.08.2018 by <NAME>
def wald_test(self, data, model=None, features=None, woe_transform=None, out=None, sep=';'):
'''
Calculation of Standard Errors (sqrt from diagonal of covariance matrix),
Wald Chi-Square (coefficient divided by SE and squared) and p-values for coefficicents
of given model on given data
Parameters
-----------
data: data for statistics calculation
model: model with coefficients, that will be used to calculate statistics
features: features to be used for statistics calculation (in case model
was not fitted using its own selected features - model.selected)
woe_transform: a woe object with binning information to perform WoE-transformation
out: a path for csv/xlsx output file to export
sep: the separator to be used in case of csv export
Returns
-----------
a dataframe with standard errors, wald statistics and p-values for feature coefficients
'''
if features is not None:
features_initial=features.copy()
else:
features_initial=self.features.copy()
if model is None:
model_to_check=self.model
else:
model_to_check=model
features_to_check=[]
coefs_list=[model_to_check.intercept_[0]]
for i in range(len(features_initial)):
if model_to_check.coef_[0][i]!=0:
features_to_check.append(features_initial[i])
coefs_list.append(model_to_check.coef_[0][i])
if woe_transform is not None:
data=woe_transform.transform(data, keep_essential=True, original_values=True, calc_gini=False)
# Calculate matrix of predicted class probabilities.
# Check resLogit.classes_ to make sure that sklearn ordered your classes as expected
predProbs = np.matrix(model_to_check.predict_proba(data.dataframe[features_initial]))
# Design matrix -- add column of 1's at the beginning of your X_train matrix
X_design = np.hstack((np.ones(shape = (data.dataframe[features_to_check].shape[0],1)),
data.dataframe[features_to_check]))
# Initiate matrix of 0's, fill diagonal with each predicted observation's variance
#not enough memory for big df
#V = np.matrix(np.zeros(shape = (X_design.shape[0], X_design.shape[0])))
#np.fill_diagonal(V, np.multiply(predProbs[:,0], predProbs[:,1]).A1)
if data.weights is not None:
V=np.multiply(np.matrix(data.dataframe[data.weights]).T, np.multiply(predProbs[:,0], predProbs[:,1])).A1
else:
V=np.multiply(predProbs[:,0], predProbs[:,1]).A1
# Covariance matrix
covLogit = np.linalg.inv(np.matrix(X_design.T * V) * X_design)
# Output
bse=np.sqrt(np.diag(covLogit))
wald=(coefs_list / bse) ** 2
pvalue=chi2.sf(wald, 1)
features_test=pd.DataFrame({'feature':['intercept']+[x for x in features_initial],
'coefficient':model_to_check.intercept_.tolist()+model_to_check.coef_[0].tolist()}).merge(pd.DataFrame({'feature':['intercept']+[x for x in features_to_check], 'se':bse,
'wald':wald,
'p-value':pvalue}),
on='feature',
how='left')
if out is not None:
if out[-4:]=='.csv':
features_test[['feature', 'coefficient', 'se', 'wald', 'p-value']].to_csv(out, sep = sep, index=False)
elif out[-4:]=='.xls' or out[-5:]=='.xlsx':
features_test[['feature', 'coefficient', 'se', 'wald', 'p-value']].to_excel(out, sheet_name='Missing', index=False)
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
return features_test[['feature', 'coefficient', 'se', 'wald', 'p-value']]
def regularized_feature_selection(self, data, regularization=None, regularization_value=None, features=None, solver = None,
features_to_leave = None, scoring = 'roc_auc', threshold = .05):
'''
Feature selection based on regularization - model uses all available features, then the features with
positive or insignificant coefficients are excluded (l1 regularization use is advised for more preliminary exclusion)
Parameters
-----------
data: data for modeling (type Data)
regularization: 'l1' for LASSO (less features to stay) or 'l2' for ridge (smaller coefficients) regression
regularization_value: effect of regularization will be more prominent for lesser regularization value
features: list of features to use for feature selection (if empty, then all features from data will be used)
features_to_leave: features that must be included in the model
scoring: type of score used to estimate the model quality
threshold: threshold for p-value when removing a feature
Returns
-----------
score for the model built on selected features and
list of selected features
'''
if features_to_leave is None:
features_to_leave=[]
if features is None:
features_to_check=data.features.copy()
else:
features_to_check=features.copy()
if regularization is None:
regularization=self.regularization
if regularization_value is None:
regularization_value=self.regularization_value
if solver is None:
solver = self.solver
# correctness check
for feature in features_to_leave:
if feature not in data.features:
print ('Feature is not available:', feature)
return None
if data.weights is None:
lr=LogisticRegression(solver=solver, penalty=regularization, C=regularization_value)
else:
lr=LogisticRegression(solver='sag', penalty=regularization, C=regularization_value)
if data.ginis is None or data.ginis == {}:
ginis=data.calc_gini()
else:
ginis=data.ginis
scores=[]
to_refit=True
while to_refit:
to_refit=False
if data.weights == None:
lr.fit(data.dataframe[features_to_check], data.dataframe[data.target])
else:
lr.fit(data.dataframe[features_to_check], data.dataframe[data.target],
sample_weight = data.dataframe[data.weights])
new_score = self.get_cv_score(Data(data.dataframe, target = data.target, features = features_to_check,
weights = data.weights),
scoring = scoring, selected_features=False)
scores.append(new_score)
positive_to_exclude=[x for x in np.asarray(features_to_check)[lr.coef_[0]>0] if x not in features_to_leave]
if len(positive_to_exclude)>0:
to_refit=True
features_to_exclude={x:ginis[x] for x in positive_to_exclude}
to_exclude=min(features_to_exclude, key=features_to_exclude.get)
print('Dropping ', to_exclude, 'with positive coefficient and gini =', ginis[to_exclude])
features_to_check.remove(to_exclude)
else:
wald=self.wald_test(data, model=lr, features=features_to_check)
feature_to_exclude_array=wald[(wald['p-value']>threshold) & (wald['p-value']==wald['p-value'].max()) & (wald['feature'].isin(features_to_leave+['intercept'])==False)]['feature'].values
if len(feature_to_exclude_array)>0:
to_refit=True
print('Dropping ', feature_to_exclude_array[0], 'with p-value =', wald[wald['feature']==feature_to_exclude_array[0]]['p-value'].values[0], 'and gini =', ginis[feature_to_exclude_array[0]])
features_to_check.remove(feature_to_exclude_array[0])
result_features=[]
for i in range(len(lr.coef_[0])):
if lr.coef_[0][i]==0:
print('Dropping ', features_to_check[i], 'with zero coefficient (gini =', ginis[features_to_check[i]], ')')
else:
result_features.append(features_to_check[i])
plt.plot(np.arange(len(scores)), scores, 'bo-', linewidth=2.0)
plt.xticks(np.arange(len(scores)), ['step ' + str(i) for i in np.arange(len(scores))], rotation = 'vertical')
plt.ylabel(scoring)
plt.title('Score changes')
plt.show()
self.selected = result_features
return new_score, self.selected
def stepwise_feature_selection(self, data, kind = 'mixed', features=None, features_initial=None, features_to_leave = None,
eps = .0005, scoring = 'roc_auc', forward_threshold = .05, backward_threshold = .05,
regularization=None, regularization_value=None):
'''
Stepwise feature selection can be of 3 types: forward, backward, mixed.
Forward: on each step the feature is selected that increases the score most while the score changes
are greater than epsilon.
Backward: starts from all the possible features, on each step removes the feature with the least score
decrease while the score changes are greater than epsilon (epsilon should be set to small negative value).
Mixed: each step contains 2 stages. Stage 1: the algorithm selects from features-candidates a significant
feature that increases the score most. Stage 2: from the features in models removes the feature with the
least significance for the model.
Parameters
-----------
data: data for modeling (type Data)
kind: type of the algorithm, can be 'forward', 'backward' or 'mixed'
features: list of features from which selection is working (if None, then data.features are used)
features_initial: starting feature set for feature selection
features_to_leave: features that must be included in the model
eps: minimum significant score difference
scoring: type of score used to estimate the model quality
forward_threshold: threshold for p-value when adding a feature
backward_threshold: threshold for p-value when removing a feature
regularization: type of regularization to be used for wald test (l1 or l2)
regularization_value: value of regularization parameter to be used for wald test
Returns
-----------
score for the model built on selected features
list of selected features
'''
if features_to_leave is None:
features_to_leave=[]
to_leave = features_to_leave.copy()
#final_features = []
candidates = []
if features is None:
features=data.features.copy()
best_scores = []
features_change = []
# correctness check
for feature in features_to_leave:
if feature not in data.features:
print ('Achtung bitte! Keine', feature)
return None
if features_initial is not None:
if feature not in features_initial:
print ('No', feature, 'in initial feature list provided! ')
return None
if regularization is None:
regularization=self.regularization
if regularization_value is None:
regularization_value=self.regularization_value
# Forward selection
if kind == 'forward':
print ('Forward feature selection started')
if features_initial is None:
features_initial=to_leave
for feature in features:
if feature not in features_initial:
candidates.append(feature)
features=features_initial.copy()
if len(features)>0:
prev_score = self.get_cv_score(Data(data.dataframe, target = data.target,
features = features, weights = data.weights),
scoring = scoring, selected_features=False)
best_scores.append(prev_score)
features_change.append('initial')
print('Initial features:', features, '', scoring, 'score', prev_score)
else:
prev_score=-1000
# maximum number of steps equals to the number of candidates
for i in range(len(candidates)):
# cross-validation scores for each of the remaining candidates of the step
cvs = {}
for feature in candidates:
tmp_features = features.copy()
# the feature is included in the model and the quality of the new model is calculated
tmp_features.append(feature)
try:
score = self.get_cv_score(Data(data.dataframe, target = data.target, features = tmp_features,
weights = data.weights),
scoring = scoring, selected_features=False)
except Exception:
pass
cvs[feature] = score
# looking for the best new feature
for f, s in cvs.items():
# warning: the metric is maximized
if s == max(cvs.values()):
# if the difference between the old and the new scores is greater than eps, the feature is added and the next step follows
if s - prev_score > eps:
print ('To add:', f, '', scoring, 'score:', cvs[f])
prev_score = s
features.append(f)
candidates.remove(f)
best_scores.append(s)
features_change.append(f)
# if the difference between the old and the new scoresis smaller than eps, the score dynamics are plotted and exit follows
else:
self.features = tmp_features
plt.plot(np.arange(len(features_change)), best_scores, 'bo-', linewidth=2.0)
plt.xticks(np.arange(len(features_change)), features_change, rotation = 'vertical')
plt.xlabel('Feature addition')
plt.ylabel(scoring)
plt.title('Stepwise score changes')
plt.show()
return prev_score, features
# if no features added
print('No features are available to add')
self.features = features
return prev_score, features
# Backward selection
elif kind == 'backward':
if features_initial is not None:
features=features_initial.copy()
for feature in features:
if feature not in to_leave:
candidates.append(feature)
print ('Backward selection started')
if len(features)>0:
prev_score = self.get_cv_score(Data(data.dataframe, target = data.target, features = features,
weights = data.weights), scoring = scoring, selected_features=False)
best_scores.append(prev_score)
features_change.append('initial')
print('Initial features:', features, '', scoring, 'score', prev_score)
else:
prev_score=-1000
#print('prev_score', prev_score, 'features', features, 'candidates', candidates)
# maximum number of steps equals to the number of candidates
for i in range(len(candidates)):
cvs = {}
if len(features)>1 and len(candidates)>0:
for feature in candidates:
tmp_features = features.copy()
# feature is removed and the cross-validation score is calculated
tmp_features.remove(feature)
cvs[feature] = self.get_cv_score(Data(data.dataframe, target = data.target, features = tmp_features,
weights = data.weights), scoring = scoring, selected_features=False)
else:
print('No features are available to exclude (at least 1 feature should remain)')
# searching for the feature that increases the quality most
features_=features.copy()
for f, s in cvs.items():
# if the difference between the old and the new scores is greater than eps, the feature is removed and the next step follows
if s == max(cvs.values()):
if s - prev_score > eps:
print ('To drop:', f, '', scoring, 'score:', cvs[f])
prev_score = s
candidates.remove(f)
features.remove(f)
best_scores.append(s)
features_change.append(f)
# if the quality increase is less than eps, exit
if features==features_ or len(candidates)==0:
if len(features)>1 and len(candidates):
print('All features exclusion cause too significant score decrease')
self.features = candidates + to_leave
plt.plot(np.arange(len(features_change)), best_scores, 'bo-', linewidth=2.0)
plt.xticks(np.arange(len(features_change)), features_change, rotation = 'vertical')
plt.xlabel('Features removed')
plt.ylabel(scoring)
plt.title('Stepwise score changes')
plt.show()
return prev_score, self.features
# if no feature was removed
return prev_score, features
# Mixed
elif kind == 'mixed':
print ('Mixed selection started')
if features_initial is None:
features_initial=to_leave
for feature in features:
if feature not in to_leave:
candidates.append(feature)
if data.weights is None:
lr=LogisticRegression(solver='saga', penalty=regularization, C=regularization_value)
else:
lr=LogisticRegression(solver='sag', penalty=regularization, C=regularization_value)
prev_score = -1000
result_features = features_initial.copy()
scores = []
feature_sets = []
if len(result_features)>0:
new_score = self.get_cv_score(Data(data.dataframe, target = data.target,
features = result_features, weights = data.weights),
scoring = scoring, selected_features=False)
scores.append(new_score)
feature_sets.append(set(result_features))
else:
new_score = 0
to_continue=True
while to_continue and len(candidates)> 0:
to_continue=False
prev_score = new_score
pvalues = {}
cvs = {}
for candidate in [x for x in candidates if (x in result_features)==False]:
# new feature addition and the model quality estimation
if data.weights == None:
lr.fit(data.dataframe[result_features + [candidate]], data.dataframe[data.target])
else:
lr.fit(data.dataframe[result_features + [candidate]], data.dataframe[data.target],
sample_weight = data.dataframe[data.weights])
new_score = self.get_cv_score(Data(data.dataframe, target = data.target,
features = result_features + [candidate], weights = data.weights),
scoring = scoring, selected_features=False)
wald=self.wald_test(data, model=lr, features=result_features + [candidate])
pvalues[candidate] = wald[wald['feature']==candidate]['p-value'].values[0]
cvs[candidate] = new_score
# searching for a significant feature that gives the greatest score increase
result_features_=result_features.copy()
for feature in sorted(cvs, key = cvs.get, reverse = True):
if pvalues[feature] < forward_threshold and feature != 'intercept':
print ('To add:', feature, '', scoring, 'score:', cvs[feature], ' p-value', pvalues[feature])
result_features.append(feature)
break
if result_features==result_features_:
print('No significant features to add were found')
else:
if set(result_features) in feature_sets:
print('Feature selection entered loop: terminating feature selection')
break
elif cvs[feature]-prev_score>eps:
to_continue=True
scores.append(cvs[feature])
feature_sets.append(set(result_features))
#print('result_features', result_features)
# the least significant feature is removed
# if it is Step1 then no removal
if len(result_features)>1:
if data.weights == None:
lr.fit(data.dataframe[result_features], data.dataframe[data.target])
else:
lr.fit(data.dataframe[result_features], data.dataframe[data.target],
sample_weight = data.dataframe[data.weights])
wald=self.wald_test(data, model=lr, features=result_features)
wald_to_check=wald[wald['feature'].isin(to_leave+['intercept'])==False]
#display(wald_to_check)
if max(wald_to_check['p-value']) > backward_threshold:
to_delete = wald_to_check[wald_to_check['p-value']==wald_to_check['p-value'].max()]['feature'].values[0]
"""if feature == to_delete:
candidates.remove(feature)
prev_score = prev_score-eps-0.05"""
result_features.remove(to_delete)
new_score = self.get_cv_score(Data(data.dataframe, target = data.target,
features = result_features, weights = data.weights),
scoring = scoring, selected_features=False)
print ('To drop:', to_delete, '', scoring, 'score', new_score, 'p-value', wald_to_check[wald_to_check['feature']==to_delete]['p-value'].values[0])
if set(result_features) in feature_sets:
print('Feature selection entered loop: terminating feature selection')
break
else:
to_continue=True
scores.append(new_score)
feature_sets.append(set(result_features))
elif wald_to_check[wald_to_check['coefficient']==0].shape[0] > 0:
to_delete = wald_to_check[wald_to_check['coefficient']==0]['feature'].tolist()
"""if feature == to_delete:
candidates.remove(feature)
prev_score = prev_score-eps-0.05"""
print ('To drop:', to_delete, ' with zero coefficients (no score changes)')
result_features=[x for x in result_features if x not in to_delete]
if set(result_features) in feature_sets:
print('Feature selection entered loop: terminating feature selection')
break
else:
to_continue=True
new_score = self.get_cv_score(Data(data.dataframe, target = data.target,
features = result_features, weights = data.weights),
scoring = scoring, selected_features=False)
scores.append(new_score)
feature_sets.append(set(result_features))
plt.plot(np.arange(len(scores)), scores, 'bo-', linewidth=2.0)
plt.xticks(np.arange(len(scores)), ['step ' + str(i) for i in np.arange(len(scores))], rotation = 'vertical')
plt.ylabel(scoring)
plt.title('Stepwise score changes')
plt.show()
self.selected = sorted(list(feature_sets[-1]))
return new_score, self.selected
else:
print ('Incorrect kind of selection. Please use backward, forward or mixed. Good luck.')
return None
#edited 22.08.2018 by <NAME> - selected_features=True
def fit(self, data, selected_features = True):
'''
Fits the model to the data given on the selected features or on all.
Parameters
-----------
data: data (type Data) for fitting
selected_features: whether to fit on the features selected previously or not,
True - use selected features, False - use all features
'''
self.coefs = {}
if selected_features:
print('Using selected features: '+str(self.selected))
self.features = self.selected
else:
print('Using all available features: '+str(data.features))
self.features = data.features
if self.features == None:
print ('No features, how can that happen? :(')
return None
try:
if data.weights is None:
self.model.fit(data.dataframe[self.features], data.dataframe[data.target])
else:
self.model.fit(data.dataframe[self.features], data.dataframe[data.target],
sample_weight = data.dataframe[data.weights])
except Exception:
print('Fit failed! Maybe there are missings in data?...')
return None
for i in range(len(self.features)):
self.coefs[self.features[i]] = self.model.coef_[0][i]
def final_exclude(self, input_data, excluded=None, apply_changes=False):
'''
Checks the effect of one feature exclusion (after exclusion all features from 'excluded' list) for each of
available features and prints initial gini values and difference after each feature exclusion. After exclusion
list is decided this method can be used to exclude decided features and fit current model, using the rest of them.
Parameters
-----------
input_data: A Data or DataSamples object for fitting and gini calculation
excluded: a list of features to exclude before exclusion cycle
apply_changes: if True then all features from 'excluded' list will be excluded and the model will be fitted
using the rest of the features
'''
if len(self.selected)==0:
print('No selected features to try exclusion. Abort.')
return
if excluded is None:
excluded=[]
if type(input_data)==DataSamples:
retrain_data=input_data.train
else:
retrain_data=input_data
new_selected=[x for x in self.selected if x not in excluded]
if apply_changes:
self.selected=new_selected
self.fit(retrain_data, selected_features=True)
self.draw_coefs()
return self.roc_curve(input_data, figsize=(10, 7))
else:
try_model=LogisticRegressionModel(random_state = 42, penalty = self.regularization, C = self.regularization_value, solver = self.solver)
try_model.selected=new_selected
try_model.fit(retrain_data, selected_features=True)
#try_model.draw_coefs()
ginis_excl={}
ginis_excl['initial']=try_model.roc_curve(input_data, verbose=False)
for excl in new_selected:
#try_model = LogisticRegressionModel(random_state = 42, penalty = self.regularization, C = self.regularization_value, solver = self.solver)
try_model.selected=[x for x in new_selected if x!=excl]
try_model.fit(retrain_data, selected_features=True)
#try_model.draw_coefs()
new_ginis=try_model.roc_curve(input_data, verbose=False)
ginis_excl[excl]=[new_ginis[0]-ginis_excl['initial'][0], new_ginis[1]-ginis_excl['initial'][1], new_ginis[2]-ginis_excl['initial'][2]]
ginis_excl_df=pd.DataFrame(ginis_excl).T
if type(input_data)==DataSamples:
cols=['Train']
if input_data.validate is not None:
cols.append('Validate')
if input_data.test is not None:
cols.append('Test')
ginis_excl_df.columns=cols
ginis_excl_df.sort_values('Test' if 'Test' in cols else 'Validate' if 'Validate' in cols else 'Train', ascending=False, inplace=True)
else:
ginis_excl_df.columns=['Data']
ginis_excl_df.sort_values('Data', ascending=False, inplace=True)
return ginis_excl_df
def bootstrap_gini(self, bs_base, samples, bootstrap_part=0.75, bootstrap_number=10, stratify=True, replace=True, seed=0,
woe_transform=None, crosses_transform=None, figsize=(15,10), bins=None):
'''
Calculates Gini in bootstrap samples (either provided or generated) and plots their distribution with
gini values from provided samples (Data, DataSamples or list of Data)
Parameters
-----------
bs_base: a DataSamples object with bootstrap_base and bootstrap or a Data object to generate boostrap samples from
samples: a DataSamples object with train/validate/test samples, a Data object or a list of Data objects to mark gini values on plot
bootstrap_part: the size of each bootstrap sample is defined as part of input data sample
bootstrap_number: number of generated bootstrap samples
stratify: should bootstraping be stratified by data target
replace: is it acceptable to repeat rows from train dataframe for bootstrap samples
seed: value of random_state for dataframe.sample (each random_state is calculated as seed + number in bootstrap)
woe_transform: a WOE object to perform WoE-transformation before using model
bins: number of bins for the distribution plot (if None - use Freedman-Diaconis rule)
crosses_transform: a Crosses object to perform cross-transformation before using model
'''
if isinstance(bs_base, DataSamples):
if bs_base.bootstrap_base is None:
print('No bootstrap data provided in the input DataSamples object. Return none')
return None
else:
print('Using bootstrap data provided in the input DataSamples object..')
bootstrap=bs_base.bootstrap
bootstrap_base=bs_base.bootstrap_base
elif isinstance(bs_base, Data):
print('Generating bootstrap data from the input Data object..')
DS_gini=DataSamples()
DS_gini.bootstrap_split(bs_base, bootstrap_part=bootstrap_part, bootstrap_number=bootstrap_number, stratify=stratify, replace=replace, seed=seed)
bootstrap=DS_gini.bootstrap
bootstrap_base=DS_gini.bootstrap_base
else:
print('No bootstrap data was provided in the input. Return none')
return None
if isinstance(samples, DataSamples):
check_samples=[]
for sample in [samples.train, samples.validate, samples.test]:
if sample is not None:
check_samples.append(sample)
elif isinstance(samples, list):
check_samples=samples.copy()
elif isinstance(samples, Data):
check_samples=[samples]
else:
print('No samples data was provided in the input')
check_samples=[]
samples_gini={}
for i in range(len(check_samples)):
if check_samples[i].name is None:
current_sample=str(i)
else:
current_sample=check_samples[i].name
print('Calculating gini for', current_sample,'sample..')
if self.selected!=[x for x in self.selected if x in check_samples[i].dataframe]:
print('Not all features from the current model were found in the',current_sample,'sample..')
if woe_transform is None and crosses_transform is None:
print('No WOE or Crosses object were found. Return None.')
return None
else:
if woe_transform is not None:
print('Starting woe-transformation..')
to_calc_gini=woe_transform.transform(check_samples[i],
features=[x[:-4] for x in self.selected if x[:-4] in woe_transform.feature_woes],
keep_essential=False if crosses_transform is not None else True, calc_gini=False)
if crosses_transform is not None:
print('Starting crosses-transformation..')
to_calc_gini=crosses_transform.transform(to_calc_gini if woe_transform is not None else check_samples[i],
keep_essential=True, calc_gini=False)
else:
to_calc_gini=Data(check_samples[i].dataframe[self.selected+[check_samples[i].target]], check_samples[i].target, features=self.selected)
preds = self.predict(to_calc_gini)[:,1]
fpr, tpr, _ = roc_curve(to_calc_gini.dataframe[to_calc_gini.target], preds)
samples_gini[current_sample] = (2*auc(fpr, tpr)-1)*100
if self.selected!=[x for x in self.selected if x in bootstrap_base.dataframe]:
print('Not all features from the current model were found in the bootstrap data..')
if woe_transform is None and crosses_transform is None:
print('No WOE or Crosses object were found. Return None.')
return None
else:
if woe_transform is not None:
print('Starting woe-transformation..')
bootstrap_base=woe_transform.transform(bootstrap_base,
features=[x[:-4] for x in self.selected if x[:-4] in woe_transform.feature_woes],
keep_essential=False if crosses_transform is not None else True, calc_gini=False)
if crosses_transform is not None:
print('Starting crosses-transformation..')
bootstrap_base=crosses_transform.transform(bootstrap_base, keep_essential=True, calc_gini=False)
#bootstrap_base=woe_transform.transform(bootstrap_base, features=[x[:-4] for x in self.selected], keep_essential=True, calc_gini=False)
bootstrap_gini=[]
print('Calculating gini for bootstrap samples..')
for i in range(len(bootstrap)):
preds = self.predict(Data(bootstrap_base.dataframe.iloc[bootstrap[i]][self.selected], bootstrap_base.target, features=self.selected))[:,1]
fpr, tpr, _ = roc_curve(bootstrap_base.dataframe.iloc[bootstrap[i]][bootstrap_base.target], preds)
bootstrap_gini.append((2*auc(fpr, tpr)-1)*100)
plt.figure(figsize=figsize)
sns.distplot(bootstrap_gini, bins=bins)
palette = itertools.cycle(sns.color_palette())
for s in samples_gini:
plt.axvline(x=samples_gini[s], linestyle='--', color=next(palette), label=s)
plt.axvline(x=np.mean(bootstrap_gini)-2*np.std(bootstrap_gini), linestyle='-', color='red', alpha=0.5)
plt.text(np.mean(bootstrap_gini)-2*np.std(bootstrap_gini), 0, ' mean-2*std = '+str(round(np.mean(bootstrap_gini)-2*np.std(bootstrap_gini),4)),
horizontalalignment='right', verticalalignment='bottom', rotation=90, fontsize=12)
plt.axvline(x=np.mean(bootstrap_gini)+2*np.std(bootstrap_gini), linestyle='-', color='red', alpha=0.5)
plt.text(np.mean(bootstrap_gini)+2*np.std(bootstrap_gini), 0, ' mean+2*std = '+str(round(np.mean(bootstrap_gini)+2*np.std(bootstrap_gini),4)),
horizontalalignment='right', verticalalignment='bottom', rotation=90, fontsize=12)
plt.xlabel('Gini values in bootstrap')
plt.ylabel('Distribution')
plt.legend()
#plt.title(feature.feature, fontsize = 16)
#if out:
# plt.savefig(out_images+feature.feature+".png", dpi=100, bbox_inches='tight')
plt.show()
return samples_gini, bootstrap_gini
def drop_features(self, to_drop = None):
'''
deletes features from the model
Parameters
-----------
to_drop: a feature or a list of features that should be excluded
'''
if to_drop is None:
print ('Please enter the features you want to exclude. Use parameter features_to_drop and restart this method.')
return None
elif isinstance(to_drop, list):
print ('The features will be removed from the "selected features" list.')
for feature in to_drop:
if feature in self.selected:
self.selected.remove(feature)
print (feature, 'removed')
else:
print ('The feature will be removed from the "selected features" list.')
if to_drop in self.selected:
self.selected.remove(feature)
print (to_drop, 'removed')
#edited 22.08.2018 by <NAME> - selected_features=True
def get_cv_score(self, data, cv = 5, scoring = 'roc_auc', selected_features = True):
'''
Calculates the model quality with cross-validation
Parameters
-----------
data: data for cross-validation score calculation
cv: number of folds
scoring: metric of quality
selected_features: whether to use selected features or not, True - use selected features, False - use all features
Returns
-----------
cross-validation score
'''
if selected_features:
features = self.selected
else:
features = data.features
if features == None:
print ('No features, how can that happen? :(')
return None
if data.weights == None:
return cross_val_score(self.model, data.dataframe[features],
data.dataframe[data.target], cv = cv, scoring = scoring).mean()
else:
return cross_val_score(self.model, data.dataframe[features], data.dataframe[data.target], cv = cv,
scoring = scoring, fit_params = {'sample_weight' : data.dataframe[data.weights]}).mean()
def form_scorecard(self, woe=None, crosses=None, out = None, sep=';', score_value=444, score_odds=10, double_odds=69):
'''
Makes a scorecard and exports it to a file.
Parameters
-----------
woe: a WOE object for scoring card
crosses: a Crosses object for scoring card
out: file to export the scorecard in csv/xlsx format
sep: the separator to be used in case of csv export
score_value: score value, used for scaling
score_odds: odds of score value, used for scaling
double_odds: score value increament, that halfes the odds, used for scaling
Returns:
----------
A scorecard (pandas.DataFrame)
'''
# if no WOE used then onle regression coefficients are included
if woe is None and crosses is None:
print ('Achung bitte: keine WOE')
scorecard = pd.DataFrame(columns = ['feature', 'coefficient'])
for feature in self.features:
tmp = pd.DataFrame([[feature, self.coefs[feature]]], columns = ['feature', 'coefficient'])
scorecard = scorecard.append(tmp, ignore_index=True)
scorecard = scorecard.append(pd.DataFrame([['intercept', self.model.intercept_[0]]], columns = ['feature',
'coefficient']),
ignore_index=True)
#scorecard.to_csv(fname, sep = ';')
#return scorecard
else:
scorecard = pd.DataFrame(columns = ['feature', 'categorical', 'group', 'values', 'missing', 'woe', 'coefficient',
'sample_part', 'ER'])
for feature in self.features:
if woe is not None and feature[:-4] in woe.feature_woes:
woes = woe.feature_woes[feature[:-4]].woes
missing_group=woe.feature_woes[feature[:-4]].missing_group
groups = woe.feature_woes[feature[:-4]].groups
categorical=woe.feature_woes[feature[:-4]].categorical
d=woe.feature_woes[feature[:-4]].data
if d.weights is None:
all_obs=d.dataframe.shape[0]
else:
all_obs=d.dataframe[d.weights].sum()
# searching for WOE for each interval of values
for group in [x for x in woes if woes[x] is not None]:
if d.weights is None:
obs=d.dataframe[d.dataframe[feature]==woes[group]].shape[0]
bad=d.dataframe[d.dataframe[feature]==woes[group]][d.target].sum()
else:
obs=d.dataframe[d.dataframe[feature]==woes[group]][d.weights].sum()
bad=d.dataframe[(d.dataframe[feature]==woes[group]) & (d.dataframe[d.target]==1)][d.weights].sum()
missing_in=(group==missing_group)*1
tmp = pd.DataFrame([[feature[:-4], categorical, group, groups[group], missing_in, woes[group], self.coefs[feature],
obs/all_obs, bad/obs]],
columns = ['feature', 'categorical', 'group', 'values', 'missing', 'woe', 'coefficient',
'sample_part', 'ER'])
scorecard = scorecard.append(tmp, ignore_index=True)
elif crosses is not None and int(feature[len(crosses.prefix):-4]) in crosses.decision_trees:
tree = crosses.decision_trees[int(feature[len(crosses.prefix):-4])].tree.dropna(how='all', axis=1)
leaves = tree[tree['leaf']]
for group in sorted(leaves['group'].unique().tolist()):
current_group=leaves[leaves['group']==group]
used_features=list(leaves.columns[:leaves.columns.get_loc('node')])
current_woe=current_group['group_woe'].unique()[0]
current_er=current_group['group_target'].unique()[0]/current_group['group_amount'].unique()[0]
current_sample_part=current_group['group_amount'].unique()[0]/leaves[['group', 'group_amount']].drop_duplicates()['group_amount'].sum()
current_values=[]
for _, row in current_group.iterrows():
used_features=[]
parent_node=row['parent_node']
while parent_node is not None:
used_features=[tree[tree['node']==parent_node]['split_feature'].values[0]]+used_features
parent_node=tree[tree['node']==parent_node]['parent_node'].values[0]
current_values.append({x:row[x] for x in used_features})
#current_values=[{x:row[x] for x in used_features if row[x] is not None} for _, row in current_group.iterrows()]
scorecard = scorecard.append({'feature':feature[:-4], 'categorical':np.nan, 'group':group,
'values': current_values, 'missing':0, 'woe':current_woe,
'coefficient':self.coefs[feature], 'sample_part':current_sample_part,
'ER':current_er}
, ignore_index=True)
else:
print ('Achung bitte: keine feature',feature,'. Skipping')
scorecard = scorecard.sort_values(by = ['feature', 'group'])
# bias addition
scorecard_intercept = pd.DataFrame([['intercept', np.nan, np.nan, np.nan, np.nan, np.nan, self.model.intercept_[0], np.nan, np.nan]],
columns = ['feature', 'categorical', 'group', 'values', 'missing', 'woe',
'coefficient', 'sample_part', 'ER'])
multiplier=double_odds/np.log(2)
if double_odds>0:
scorecard=scorecard.merge(scorecard[['feature', 'woe']].groupby('feature', as_index=False).min().rename(index=str, columns={"woe": "woe_shift"}), on='feature',how='left')
else:
scorecard=scorecard.merge(scorecard[['feature', 'woe']].groupby('feature', as_index=False).max().rename(index=str, columns={"woe": "woe_shift"}), on='feature',how='left')
scorecard['woe_shifted']=scorecard['woe']-scorecard['woe_shift']
scorecard['score']=-(scorecard['woe_shifted']*scorecard['coefficient']*multiplier)
for_intercept=scorecard[['coefficient', 'woe_shift']].drop_duplicates().copy()
for_intercept['woe_on_coef']=-for_intercept['coefficient']*for_intercept['woe_shift']*multiplier
scorecard_intercept['score']=-((scorecard_intercept['coefficient']+np.log(score_odds))*multiplier)+score_value+for_intercept['woe_on_coef'].sum()
scorecard_intercept.index=[-1]
scorecard=scorecard.append(scorecard_intercept).sort_index().reset_index(drop=True)[['feature', 'categorical', 'group',
'values', 'missing', 'woe',
'coefficient', 'score',
'sample_part', 'ER']]
#display(scorecard)
scorecard['score']=round(scorecard['score']).astype('int64')
scorecard['values']=scorecard['values'].astype(str)
# export to a file
if out is not None:
if out[-4:]=='.csv':
scorecard.to_csv(out, sep = sep, index=False)
elif out[-4:]=='.xls' or out[-5:]=='.xlsx':
scorecard.to_excel(out, sheet_name='Missing', index=False)
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
self.scorecard = scorecard
return scorecard
#edited 28.08.2018 by <NAME>
def score(self, data, features_to_leave=None, include_scores_in_features=False, unknown_score=0, verbose=True):
'''
Performs data scoring
Parameters
-----------
data: data of type Data
features_to_leave: list of fields to include in output dataframe
include_scores_in_features: should all scores be treated as features in output Data object (otherwise new features will be empty)
Returns
-----------
Data object, containing dataframe with initial features (+ features_to_leave), their scores and overall score
'''
if verbose:
print ('Scores calculation...')
if self.scorecard is None or self.scorecard.shape[0] == 0:
print ('No scorecard! Where is it?')
return None
if 'score' not in self.scorecard.columns:
print ('Please set scores: scorecard[score]')
return None
scorecard=self.scorecard.copy()
scorecard['values']=scorecard['values'].astype(str)
features_to_leave=[] if features_to_leave is None else features_to_leave.copy()
features_to_leave+=([data.target] if data.target is not None else [])+([data.weights] if data.weights is not None else [])
features_to_leave=list(set(features_to_leave))
trees_for_score=scorecard[scorecard.apply(lambda row: pd.isnull(row['categorical']) and row['feature']!='intercept', axis=1)]['feature'].unique().tolist()
features_for_score=[x for x in scorecard.feature.unique() if x!='intercept' and x not in trees_for_score]
all_features=features_for_score.copy()
scorecard.loc[scorecard.feature.isin(trees_for_score)==False, 'values']=\
scorecard.loc[scorecard.feature.isin(trees_for_score)==False, 'values'].apply(lambda x:
np.nan if x=='nan' else \
eval(x.replace('[nan]', '[np.nan]').replace('[nan,','[np.nan,').replace(', nan]',', np.nan]')\
.replace(', inf]',', np.inf]').replace('[-inf,','[-np.inf,')))
if len(trees_for_score)>0:
scorecard.loc[scorecard.feature.isin(trees_for_score), 'values']=\
scorecard.loc[scorecard.feature.isin(trees_for_score), 'values'].apply(lambda x:
eval(x.replace(': nan,',': np.nan,').replace(': nan}',': np.nan}')\
.replace('), nan)','), np.nan)').replace(', nan,',', np.nan,')\
.replace('[nan,','[np.nan,').replace(', nan]',', np.nan]').replace('[nan]', '[np.nan]')\
.replace(', inf)',', np.inf)').replace('(-inf,','(-np.inf,')))
all_features+=list(set([f for values in scorecard[scorecard.feature.isin(trees_for_score)]['values'] for node in values for f in node]))
all_features=list(set(all_features))
all_features=sorted(all_features)
try:
data_with_scores = data.dataframe[list(set(all_features+features_to_leave))].copy()
for feature in features_for_score:
if verbose:
print (feature)
bounds = list(scorecard[scorecard.feature == feature]['values'])
scores = list(scorecard[scorecard.feature == feature].score)
missing = list(scorecard[scorecard.feature == feature].missing)
categorical = list(scorecard[scorecard.feature == feature].categorical)[0]
if categorical==False:
bs = {}
missing_score=0
for i in range(len(bounds)):
if missing[i]==1:
missing_score=scores[i]
if isinstance(bounds[i],list):
bs[scores[i]]=bounds[i][0]
bs[np.inf]=np.inf
bs={x:bs[x] for x in sorted(bs, key=bs.get)}
data_with_scores[feature+'_scr']=data_with_scores[feature].apply(
lambda x: missing_score if pd.isnull(x) \
else list(bs.keys())[np.argmax([bs[list(bs.keys())[i]] <= x and bs[list(bs.keys())[i+1]] > x for i in range(len(bs.keys())-1)])])
else:
bs = {}
missing_score=0
for i in range(len(bounds)):
bs[scores[i]]=bounds[i]
for b in bs[scores[i]]:
if pd.isnull(b) or b=='':
missing_score=scores[i]
data_with_scores[feature+'_scr']=data_with_scores[feature].apply(
lambda x: missing_score if pd.isnull(x) \
else unknown_score if x not in [v for s in bs for v in bs[s]] \
else list(bs.keys())[[(x in bs[s]) for s in bs].index(True)])
for tree in trees_for_score:
if verbose:
print (tree)
current_tree=scorecard[scorecard.feature==tree]
conditions=[]
for group in current_tree.group:
current_conditions=current_tree[current_tree.group==group]['values'].values[0]
for condition in current_conditions:
final_condition={x:None for x in all_features}
final_condition.update(condition)
final_condition.update({'node':0, 'score':current_tree[current_tree.group==group]['score'].values[0]})
conditions.append(final_condition)
pseudo_tree=pd.DataFrame(conditions)[all_features+['node', 'score']].dropna(how='all', axis=1)
pseudo_tree['leaf']=True
#display(pseudo_tree)
data_with_scores[tree+'_scr']=DecisionTree().transform(data_with_scores, pseudo_tree, ret_values=['score'])
except Exception:
print('No raw data provided. Scoring by WoE values..')
features_for_score = [x for x in scorecard.feature.unique() if x!='intercept']
trees_for_score = []
data_with_scores = data.dataframe[list(set([x+'_WOE' for x in features_for_score]+features_to_leave))].copy()
for feature in features_for_score:
if verbose:
print (feature+'_WOE')
woes = list(scorecard[scorecard.feature == feature]['woe'])
scores = list(scorecard[scorecard.feature == feature].score)
ws = {}
for i in range(len(woes)):
ws[round(woes[i],5)]=scores[i]
data_with_scores[feature+'_scr']=data_with_scores[feature+'_WOE'].apply(
lambda x: unknown_score if round(x,5) not in ws \
else ws[round(x,5)])
data_with_scores['score']=data_with_scores[[x+'_scr' for x in features_for_score+trees_for_score]].sum(axis=1)+\
scorecard[scorecard.feature.str.lower() == 'intercept']['score'].values[0]
if include_scores_in_features:
return Data(data_with_scores, data.target, features=['score']+[x+'_scr' for x in features_for_score+trees_for_score], weights=data.weights, name=data.name)
else:
return Data(data_with_scores, data.target, weights=data.weights, name=data.name)
def score_distribution(self, base, samples=None, bins=20, figsize=(15,10), width=0.2, proportion=False, draw_base=False):
'''
Calculates score bins on base sample and draws samples distribution by these bins
Parameters
-----------
base: a Data object to calculates score bins (if there is no 'score' field in base.dataframe, then score calculation is performed)
samples: a Data\DataSamples object or a list of Data objects to draw distribution by base-defined bins
bins: number of bins to generate
figsize: a tuple for graph size
width: width of bars for graph (width*number of samples to draw should be less then 1)
proportion: True, if proportions should be drawn (if False, then amounts are drawn)
draw_base: if True, then base sample is also drawn
'''
if self.scorecard.shape[0]==0:
print('No scorecard detected. Please, run form_scorecard method. Return None')
return None
if 'score' not in base.dataframe:
print('Scoring base sample..')
to_cut=self.score(base, verbose=False).dataframe['score']
else:
to_cut=base.dataframe['score']
_, cuts= | pd.cut(to_cut, bins=bins, right=False, precision=10, retbins=True) | pandas.cut |
import requests
import pandas as pd
from bs4 import BeautifulSoup
import timeit
import numpy as np
import threading
# pd.set_option("display.max_rows", None, "display.max_columns", None)
start = timeit.default_timer()
finaldfarr = []
gameLinks = []
listOfYears = ["https://pepperdinewaves.com/sports/womens-volleyball/schedule/2018"]
def URL(url):
theURL = requests.get(str(url))
htmlParser = BeautifulSoup(theURL.text,'html.parser')
return htmlParser
def findingSet(htmlParser, id):
Set = htmlParser.find(id=id)
return Set
def editingDataFrameOther(currentSet, Set, id, Opponent1, Opponent2):
if id == "set-1":
currentSet = pd.read_html(str(Set), header=0) [0]
currentSet = currentSet.loc[:,["Score", "Serve Team"]]
currentSet['Set Score'] = 00
return currentSet
lastItem = currentSet.at[currentSet.shape[0] - 1,'Score']
x = lastItem.split("-")
if int(x[0]) > int(x[-1]):
Opponent1 += 1
winner = str(Opponent1) + str(Opponent2)
else:
Opponent2 += 1
winner = str(Opponent1) + str(Opponent2)
currentSet2 = pd.read_html(str(Set), header=0) [0]
currentSet2 = currentSet2.loc[:,["Score", "Serve Team"]]
currentSet2['Set Score'] = int(winner)
currentSet = currentSet.append(currentSet2, ignore_index=True)
return [currentSet, Opponent1, Opponent2]
def gameWinner(currentSet):
lastItem = currentSet.at[currentSet.shape[0] - 1,'Set Score']
x = list(str(lastItem))
if int(x[0]) > int(x[-1]):
currentSet['Game Score'] = True
else:
currentSet['Game Score'] = False
finaldfarr.append(currentSet)
print("done with a game")
def cleanUp(currSet):
currSet.dropna(subset=['Score'], inplace=True)
currSet = currSet.reset_index(drop=True)
return currSet
def GatherLinks(tableWithSites, gameLinks):
for link in tableWithSites.find_all('a'):
everyLink = link.get('href')
if str(everyLink).__contains__("boxscore") and gameLinks.__contains__(everyLink) == False:
gameLinks.append(everyLink)
def main():
for i in listOfYears:
theURL = requests.get(i)
soup = BeautifulSoup(theURL.text, 'html.parser')
tableWithSites = soup.find(id="schedule-view-default")
GatherLinks(tableWithSites, gameLinks)
# print(gameLinks)
for i in gameLinks:
Opponent1, Opponent2 = 0, 0
currLink = URL("https://pepperdinewaves.com" + str(i))
id = "set-1"
Set = findingSet(currLink, id)
if Set:
currSet = editingDataFrameOther(Set, Set, id, Opponent1, Opponent2)
currSet = cleanUp(currSet)
else:
continue
id = "set-2"
Set = findingSet(currLink, id)
data = editingDataFrameOther(currSet, Set, id, Opponent1, Opponent2)
Opponent1 = data[1]
Opponent2 = data[2]
currSet = data[0]
currSet = cleanUp(currSet)
id = "set-3"
Set = findingSet(currLink, id)
data = editingDataFrameOther(currSet, Set, id, Opponent1, Opponent2)
Opponent1 = data[1]
Opponent2 = data[2]
currSet = data[0]
currSet = cleanUp(currSet)
id = "set-4"
Set = findingSet(currLink, id)
if Set:
data = editingDataFrameOther(currSet, Set, id, Opponent1, Opponent2)
Opponent1 = data[1]
Opponent2 = data[2]
currSet = data[0]
currSet = cleanUp(currSet)
else:
gameWinner(currSet)
continue
id = "set-5"
Set = findingSet(currLink, id)
if Set:
data = editingDataFrameOther(currSet, Set, id, Opponent1, Opponent2)
Opponent1 = data[1]
Opponent2 = data[2]
currSet = data[0]
currSet = cleanUp(currSet)
else:
gameWinner(currSet)
continue
gameWinner(currSet)
if __name__=="__main__":
main()
finaldf = | pd.concat(finaldfarr) | pandas.concat |
#!/usr/bin/env python3
"""
UMLS REST API client
UTS = UMLS Technology Services
https://documentation.uts.nlm.nih.gov/rest/home.html
https://documentation.uts.nlm.nih.gov/rest/authentication.html
https://documentation.uts.nlm.nih.gov/rest/concept/
https://documentation.uts.nlm.nih.gov/rest/source-asserted-identifiers/
https://documentation.uts.nlm.nih.gov/rest/search/
https://www.nlm.nih.gov/research/umls/knowledge_sources/metathesaurus/release/abbreviations.html
TGT = Ticket Granting Ticket
(API requires one ticket per request.)
CUI = Concept Unique Identifier
Atom is a term in a source.
Term-to-concept is many to one.
Retrieves information for a known Semantic Type identifier (TUI)
/semantic-network/{version}/TUI/{id}
(DOES NOT SEARCH FOR INSTANCES OF THIS TYPE -- RETURNS METADATA ONLY.)
Example TUIs:
CHEM|Chemicals & Drugs|T116|Amino Acid, Peptide, or Protein
CHEM|Chemicals & Drugs|T195|Antibiotic
CHEM|Chemicals & Drugs|T123|Biologically Active Substance
CHEM|Chemicals & Drugs|T122|Biomedical or Dental Material
CHEM|Chemicals & Drugs|T103|Chemical
CHEM|Chemicals & Drugs|T120|Chemical Viewed Functionally
CHEM|Chemicals & Drugs|T104|Chemical Viewed Structurally
CHEM|Chemicals & Drugs|T200|Clinical Drug
CHEM|Chemicals & Drugs|T126|Enzyme
CHEM|Chemicals & Drugs|T125|Hormone
CHEM|Chemicals & Drugs|T129|Immunologic Factor
CHEM|Chemicals & Drugs|T130|Indicator, Reagent, or Diagnostic Aid
CHEM|Chemicals & Drugs|T114|Nucleic Acid, Nucleoside, or Nucleotide
CHEM|Chemicals & Drugs|T109|Organic Chemical
CHEM|Chemicals & Drugs|T121|Pharmacologic Substance
CHEM|Chemicals & Drugs|T192|Receptor
CHEM|Chemicals & Drugs|T127|Vitamin
DISO|Disorders|T020|Acquired Abnormality
DISO|Disorders|T190|Anatomical Abnormality
DISO|Disorders|T049|Cell or Molecular Dysfunction
DISO|Disorders|T019|Congenital Abnormality
DISO|Disorders|T047|Disease or Syndrome
DISO|Disorders|T050|Experimental Model of Disease
DISO|Disorders|T033|Finding
DISO|Disorders|T037|Injury or Poisoning
DISO|Disorders|T048|Mental or Behavioral Dysfunction
DISO|Disorders|T191|Neoplastic Process
DISO|Disorders|T046|Pathologic Function
DISO|Disorders|T184|Sign or Symptom
GENE|Genes & Molecular Sequences|T087|Amino Acid Sequence
GENE|Genes & Molecular Sequences|T088|Carbohydrate Sequence
GENE|Genes & Molecular Sequences|T028|Gene or Genome
GENE|Genes & Molecular Sequences|T085|Molecular Sequence
GENE|Genes & Molecular Sequences|T086|Nucleotide Sequence
https://github.com/HHS/uts-rest-api
https://utslogin.nlm.nih.gov
https://www.nlm.nih.gov/research/umls/knowledge_sources/metathesaurus/release/abbreviations.html
Some term types:
CE : Entry term for a Supplementary Concept
ET : Entry term
FN : Full form of descriptor
HG : High Level Group Term
HT : Hierarchical term
LLT : Lower Level Term
MH : Main heading
MTH_FN : MTH Full form of descriptor
MTH_HG : MTH High Level Group Term
MTH_HT : MTH Hierarchical term
MTH_LLT : MTH Lower Level Term
MTH_OS : MTH System-organ class
MTH_PT : Metathesaurus preferred term
MTH_SY : MTH Designated synonym
NM : Name of Supplementary Concept
OS : System-organ class
PCE : Preferred entry term for Supplementary Concept
PEP : Preferred entry term
PM : Machine permutation
PT : Designated preferred name
PTGB : British preferred term
SY : Designated synonym
SYGB : British synonym
Some relationships:
RB : has a broader relationship
RL : the relationship is similar or "alike".
RN : has a narrower relationship
RO : has relationship other than synonymous, narrower, or broader
RQ : related and possibly synonymous.
RU : Related, unspecified
SY : source asserted synonymy.
"""
###
import sys,os,re,yaml,json,urllib.parse,csv,logging,requests,time
import pandas as pd
from functools import total_ordering
#
from lxml import etree
from pyquery import PyQuery
#
from ..util import rest
#
###
API_HOST='uts-ws.nlm.nih.gov'
API_BASE_PATH="/rest"
API_BASE_URL=f"https://{API_HOST}{API_BASE_PATH}"
API_AUTH_SERVICE="http://umlsks.nlm.nih.gov"
API_AUTH_HOST="utslogin.nlm.nih.gov"
API_AUTH_ENDPOINT='/cas/v1/api-key'
API_VERSION="current"
API_HEADERS={"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain", "User-Agent":"python" }
#
## elements common to 'Concept' and 'SourceAtomCluster' class
UMLS_COMMON_FIELDS=['classType','name','ui','atomCount','definitions','atoms','defaultPreferredAtom']
UMLS_OPTIONAL_FIELDS=['parents','children','relations','descendants']
#
#############################################################################
@total_ordering
class Atom:
def __init__(self, cui, src, code, name):
self.cui = cui
self.src = src
self.code = code
self.name = name
def __eq__(self, other):
return ((self.cui, self.src, self.code) == (other.cui, other.src, other.code))
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return ((self.cui, self.src, self.code) < (other.cui, other.src, other.code))
def __hash__(self):
return id(self)
#############################################################################
class SourceList:
def __init__(self):
self.sources=[];
def initFromFile(self, sfile):
fin = open(sfile)
if not fin:
logging.error(f'Could not open {sfile}')
return
csvReader = csv.reader(fin, delimiter='\t', quotechar='"')
row = csvReader.next() #ignore header
while True:
try:
row = csvReader.next()
except:
break
self.sources.append(tuple(row))
self.sources.sort()
fin.close()
def initFromApi(self, base_url, ver, auth):
url = (f'{base_url}/metadata/{ver}/sources')
tgt = auth.gettgt()
response = UmlsApiGet(url, auth, tgt)
response.encoding = 'utf-8'
items = json.loads(response.text)
logging.debug(json.dumps(items, indent=4))
sources = items["result"]
tags = ["abbreviation", "shortName", "preferredName"]
for source in sources:
row=[]
for tag in tags:
row.append(source[tag] if tag in source else '')
self.sources.append(tuple(row))
self.sources.sort()
def has_src(self,_src):
for abbr,name,ver in self.sources:
if _src == abbr:
return True
return False
#############################################################################
class Authentication:
def __init__(self, apikey, service, url, headers):
self.apikey=apikey
self.service=service
self.url=url
self.headers=headers
self.verbosity=0
def gettgt(self):
response = requests.post(self.url, data={'apikey':self.apikey}, headers=self.headers)
logging.debug(f'response = {response}')
d = PyQuery(response.text)
if response.status_code not in (200, 201):
logging.error(f'HTTP RESPONSE CODE: {response.status_code} ({d})')
return None
## Extract the entire URL needed (action attribute),
## to make a POST call to this URL in the getst method.
tgt = d.find('form').attr('action')
logging.debug(f'tgt = {tgt}')
return tgt
def getst(self, tgt):
r = requests.post(tgt, data={'service':self.service}, headers=self.headers)
st = r.text
return st
def setVerbosity(self, v):
self.verbosity=v
#############################################################################
def UmlsAuthGetTicket(auth, tgt, tries=10, sleep=1):
for i in range(1, tries+1):
try:
tkt = auth.getst(tgt)
return tkt
except Exception as e:
logging.error(f'{i}. {e}')
time.sleep(sleep)
continue
return None
#############################################################################
def UmlsApiGet(url, auth, tgt, params={}, tries=10, sleep=1):
for i in range(1,tries+1):
try:
params['ticket'] = UmlsAuthGetTicket(auth, tgt)
response = requests.get(url, params=params)
return response
except Exception as e:
logging.error(f'{i}. {e}')
time.sleep(sleep)
continue
return None
#############################################################################
def ReadParamFile(fparam):
params={};
with open(fparam, 'r') as fh:
for param in yaml.load_all(fh, Loader=yaml.BaseLoader):
for k,v in param.items():
params[k] = v
return params
#############################################################################
def XrefConcept(src, ids, skip, nmax, auth, ver=API_VERSION, base_url=API_BASE_URL, fout=None):
"""Find UMLS concept/CUI from external source cross-reference."""
n_in=0; n_concept=0; n_out=0;
result_tags=None;
tgt = auth.gettgt()
for id_query in ids:
n_in+=1
if nmax and n_in>nmax: break
if skip and n_in<=skip:
logging.debug(f'[{id_query}] skipping...')
continue
url = (f"{base_url}/content/{ver}/source/{src}/{id_query}" if src else f"/CUI/{id_query}")
logging.debug(f'{n_in}. url="{url}"')
response = UmlsApiGet(url, auth, tgt)
response.encoding = 'utf-8'
try:
items = json.loads(response.text)
except Exception as e:
logging.info(f'{n_in}. [{id_query}] {e}')
logging.debug(f'response.text="{response.text}"')
continue
logging.debug(json.dumps(items, indent=4))
result = items["result"]
for key in UMLS_COMMON_FIELDS+UMLS_OPTIONAL_FIELDS:
logging.info(f"""{key:14s}: {(result[key] if key in result else '')}""")
if 'semanticTypes' in result:
for i,styp in enumerate(result["semanticTypes"]):
for key in styp.keys():
logging.info(f'Semantic type {i+1}. {key}: {styp[key]}')
if n_out==0 or not result_tags:
result_tags = result.keys()
id_tag = (f'{src}_id' if src else 'CUI')
fout.write('\t'.join([id_tag]+result_tags)+'\n')
vals = [id_query]
for tag in result_tags:
val = (result[tag] if tag in result else '')
if tag == 'concepts':
url2 = result['concepts']
response2 = UmlsApiGet(url2, auth, tgt)
response2.encoding = 'utf-8'
items2 = json.loads(response2.text)
logging.debug(json.dumps(items2, indent=4))
result2 = items2['result']
concepts = result2['results'] if 'results' in result2 else []
cuis=[]
for concept in concepts:
cui = concept['ui'] if 'ui' in concept else None
if cui: n_concept+=1
cuis.append(cui)
val=';'.join(cuis)
elif tag=='semanticTypes':
if type(val) is list:
sts=[]
for st in val:
if type(st) is dict and 'name' in st: sts.append(st['name'])
val = ';'.join(sts)
elif tag in ('relations', 'parents', 'children','descendants','ancestors',
'attributes', 'contentViewMemberships', 'atoms', 'defaultPreferredAtom',
'definitions'):
if val != 'NONE': val = ''
else:
if type(val) is str:
val = val.replace(base_url, '')
vals.append(str(val))
fout.write('\t'.join(vals)+'\n')
n_out+=1
logging.info(f'n_concept: {n_concept}')
logging.info(f'n_out: {n_out}')
#############################################################################
def GetCodes(cuis, srcs, auth, ver=API_VERSION, base_url=API_BASE_URL, fout=None):
n_out=0; df=None;
for cui in cuis:
codes = Cui2Code(cui, srcs, auth, ver, base_url)
for src in sorted(codes.keys()):
atoms = sorted(list(codes[src]))
for atom in atoms:
df_this = pd.DataFrame({'CUI':[cui], 'src':[src], 'atom_code':atom.code, 'atom_name':atom.name})
if fout is None: df = pd.concat([df, df_this])
else: df_this.to_csv(fout, "\t", index=False, header=bool(n_out==0))
n_out+=1
logging.info(f'n_cui: {len(cuis)}')
logging.info(f'n_out: {n_out}')
if fout is None: return df
#############################################################################
def Cui2Code(cui, srcs, auth, ver=API_VERSION, base_url=API_BASE_URL):
n_atom=0; pNum=1; params={}; codes={};
if srcs: params['sabs']=srcs
url = (f'{base_url}/content/{ver}/CUI/{cui}/atoms')
tgt = auth.gettgt()
while True:
params['pageNumber'] = pNum
response = UmlsApiGet(url, auth, tgt, params=params)
response.encoding = 'utf-8'
try:
items = json.loads(response.text)
except Exception as e:
logging.error(str(e))
break
logging.debug (json.dumps(items, indent=4))
atoms = items["result"] if "result" in items else []
for atom in atoms:
n_atom+=1
src = atom['rootSource'] if 'rootSource' in atom else None
code = atom['code'] if 'code' in atom else None
if code: code = re.sub(r'^.*/', '', code)
name = atom['name'] if 'name' in atom else None
cui = atom['concept'] if 'concept' in atom else None
if cui: cui = re.sub(r'^.*/', '', cui)
a = Atom(cui, src, code, name)
if not src in codes: codes[src] = set()
codes[src].add(a)
pageSize = items["pageSize"] if "pageSize" in items else None
pageNumber = items["pageNumber"] if "pageNumber" in items else None
pageCount = items["pageCount"] if "pageCount" in items else None
if pageNumber!=pNum:
logging.error(f'pageNumber!=pNum ({pageNumber}!={pNum})')
break
elif pNum==pageCount:
logging.debug(f'(done) pageNumber==pageCount ({pageNumber})')
break
else:
pNum+=1
return codes
#############################################################################
def GetAtoms(cuis, skip, nmax, srcs, auth, ver=API_VERSION, base_url=API_BASE_URL, fout=None):
'''Expected fields: sourceDescriptor,suppressible,code,name,language,descendants,classType,sourceConcept,obsolete,relations,parents,children,concept,ui,rootSource,definitions,attributes,ancestors,termType'''
n_in=0; n_atom=0; n_out=0; df=None; tags=None;
tgt = auth.gettgt()
for cui in cuis:
n_in+=1
if skip and n_in<=skip:
logging.debug(f'[{cui}] skipping...')
continue
pNum=1;
url = (f'{base_url}/content/{ver}/CUI/{cui}/atoms')
params={}
if srcs: params['sabs']=srcs
while True:
params['pageNumber']=pNum
response = UmlsApiGet(url, auth, tgt, params=params)
if not response:
break
if response.status_code != 200:
logging.error(f'response.status_code = "{response.status_code}"')
break
response.encoding = 'utf-8'
logging.debug(f'params = {str(params)}')
logging.debug(response.text)
items = json.loads(response.text)
logging.debug(json.dumps(items, indent=4))
result = items["result"]
for atom in result:
n_atom+=1
if not tags: tags = list(atom.keys())
for tag in ('relations', 'parents', 'children','descendants','ancestors', 'attributes', 'contentViewMemberships'):
if tag in atom and atom[tag]!='NONE': atom[tag] = '*'
df_this = pd.DataFrame({tag:[atom[tag] if tag in atom else ''] for tag in tags})
if fout is None: df = pd.concat([df, df_this])
else: df_this.to_csv(fout, "\t", index=False, header=bool(n_out==0))
n_out+=1
pageSize = items["pageSize"]
pageNumber = items["pageNumber"]
pageCount = items["pageCount"]
if pageNumber!=pNum:
logging.error(f'pageNumber!=pNum ({pageNumber}!={pNum})')
break
elif pNum==pageCount:
logging.debug(f'(done) pageNumber==pageCount ({pageNumber})')
break
else:
pNum+=1
if nmax and n_in==nmax: break
logging.info(f'n_atom: {n_atom}')
logging.info(f'n_out: {n_out}')
if fout is None: return df
#############################################################################
def GetRelations(cuis, skip, nmax, srcs, auth, ver=API_VERSION, base_url=API_BASE_URL, fout=None):
n_in=0; n_rel=0; n_out=0; tags=None; df=None;
tgt = auth.gettgt()
for cui in cuis:
n_in+=1
if skip and n_in<=skip:
logging.debug(f'[{cui}] skipping...')
continue
pNum=1;
url = (f'{base_url}/content/{ver}/CUI/{cui}/relations')
params={}
if srcs: params['sabs']=srcs
while True:
params['pageNumber']=pNum
response = UmlsApiGet(url, auth, tgt, params=params)
if not response:
break
if response.status_code != 200:
logging.error(f'response.status_code = "{response.status_code}"')
break
response.encoding = 'utf-8'
logging.debug(f'params = {str(params)}')
logging.debug(f'{response.text}')
items = json.loads(response.text)
logging.debug(json.dumps(items, indent=4))
result = items["result"]
for rel in result:
n_rel+=1
if not tags: tags = list(rel.keys())
df_this = pd.DataFrame({tag:[rel[tag] if tag in rel else ''] for tag in tags})
df_this["cui"] = [cui]
if fout is None: df = pd.concat([df, df_this])
else: df_this.to_csv(fout, "\t", index=False, header=bool(n_out==0))
n_out+=1
pageSize = items["pageSize"]
pageNumber = items["pageNumber"]
pageCount = items["pageCount"]
if pageNumber!=pNum:
logging.error(f': pageNumber!=pNum ({pageNumber}!={pNum})')
break
elif pNum==pageCount:
break
else:
pNum+=1
if nmax and n_in==nmax: break
logging.info(f'n_rel: {n_rel}')
logging.info(f'n_out: {n_out}')
if fout is None: return df
#############################################################################
def Search(query, searchType, inputType, returnIdType, srcs, auth, ver=API_VERSION, base_url=API_BASE_URL, fout=None):
"""Retrieves CUIs for a search term.
Expected fields: ui, rootSource, name, uri.
See https://documentation.uts.nlm.nih.gov/rest/search/
"""
src_counts={}; n_item=0; n_out=0; pNum=1; tags=None; df=None;
url = (f'{base_url}/search/{ver}')
params = {'string':query, 'searchType':searchType, 'inputType':inputType, 'returnIdType':returnIdType}
if srcs: params['sabs'] = srcs
tgt = auth.gettgt()
while True:
params['pageNumber']=pNum
logging.debug(f'params = {str(params)}')
response = UmlsApiGet(url, auth, tgt, params=params)
response.encoding = 'utf-8'
items = json.loads(response.text)
logging.debug (json.dumps(items, indent=4))
result = items['result']
classType = result['classType']
pageSize = items["pageSize"]
pageNumber = items["pageNumber"]
##No pageCount in search response.
items = result['results']
if not items:
break
elif len(items)==1 and items[0]['name']=='NO RESULTS':
break
elif pageNumber!=pNum:
logging.debug(f'pageNumber!=pNum ({pageNumber}!={pNum})')
break
for item in items:
n_item+=1
if not tags: tags = list(item.keys())
vals = []
cui = item['ui'] if 'ui' in item else None
if 'rootSource' in item:
if not item['rootSource'] in src_counts:
src_counts[item['rootSource']]=0
src_counts[item['rootSource']]+=1
df_this = pd.DataFrame({tag:[item[tag] if tag in item else ''] for tag in tags})
if fout is None: df = | pd.concat([df, df_this]) | pandas.concat |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import operator
import string
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.core._compat import PANDAS_GE_110
from cudf.testing._utils import (
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
)
@pytest.fixture
def pd_str_cat():
categories = list("abc")
codes = [0, 0, 1, 0, 1, 2, 0, 1, 1, 2]
return pd.Categorical.from_codes(codes, categories=categories)
def test_categorical_basic():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
cudf_cat = cudf.Index(cat)
pdsr = pd.Series(cat, index=["p", "q", "r", "s", "t"])
sr = cudf.Series(cat, index=["p", "q", "r", "s", "t"])
assert_eq(pdsr.cat.codes, sr.cat.codes, check_dtype=False)
# Test attributes
assert_eq(pdsr.cat.categories, sr.cat.categories)
assert pdsr.cat.ordered == sr.cat.ordered
np.testing.assert_array_equal(
pdsr.cat.codes.values, sr.cat.codes.to_array()
)
string = str(sr)
expect_str = """
p a
q a
r b
s c
t a
"""
assert all(x == y for x, y in zip(string.split(), expect_str.split()))
assert_eq(cat.codes, cudf_cat.codes.to_array())
def test_categorical_integer():
if not PANDAS_GE_110:
pytest.xfail(reason="pandas >=1.1 required")
cat = pd.Categorical(["a", "_", "_", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
np.testing.assert_array_equal(
cat.codes, sr.cat.codes.astype(cat.codes.dtype).fillna(-1).to_array()
)
assert sr.null_count == 2
np.testing.assert_array_equal(
pdsr.cat.codes.values,
sr.cat.codes.astype(pdsr.cat.codes.dtype).fillna(-1).to_array(),
)
string = str(sr)
expect_str = """
0 a
1 <NA>
2 <NA>
3 c
4 a
dtype: category
Categories (3, object): ['a', 'b', 'c']
"""
assert string.split() == expect_str.split()
def test_categorical_compare_unordered():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
# test equal
out = sr == sr
assert out.dtype == np.bool_
assert type(out[0]) == np.bool_
assert np.all(out.to_array())
assert np.all(pdsr == pdsr)
# test inequality
out = sr != sr
assert not np.any(out.to_array())
assert not np.any(pdsr != pdsr)
assert not pdsr.cat.ordered
assert not sr.cat.ordered
# test using ordered operators
assert_exceptions_equal(
lfunc=operator.lt,
rfunc=operator.lt,
lfunc_args_and_kwargs=([pdsr, pdsr],),
rfunc_args_and_kwargs=([sr, sr],),
)
def test_categorical_compare_ordered():
cat1 = pd.Categorical(
["a", "a", "b", "c", "a"], categories=["a", "b", "c"], ordered=True
)
pdsr1 = pd.Series(cat1)
sr1 = cudf.Series(cat1)
cat2 = pd.Categorical(
["a", "b", "a", "c", "b"], categories=["a", "b", "c"], ordered=True
)
pdsr2 = pd.Series(cat2)
sr2 = cudf.Series(cat2)
# test equal
out = sr1 == sr1
assert out.dtype == np.bool_
assert type(out[0]) == np.bool_
assert np.all(out.to_array())
assert np.all(pdsr1 == pdsr1)
# test inequality
out = sr1 != sr1
assert not np.any(out.to_array())
assert not np.any(pdsr1 != pdsr1)
assert pdsr1.cat.ordered
assert sr1.cat.ordered
# test using ordered operators
np.testing.assert_array_equal(pdsr1 < pdsr2, (sr1 < sr2).to_array())
np.testing.assert_array_equal(pdsr1 > pdsr2, (sr1 > sr2).to_array())
def test_categorical_binary_add():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
assert_exceptions_equal(
lfunc=operator.add,
rfunc=operator.add,
lfunc_args_and_kwargs=([pdsr, pdsr],),
rfunc_args_and_kwargs=([sr, sr],),
expected_error_message="Series of dtype `category` cannot perform "
"the operation: add",
)
def test_categorical_unary_ceil():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
assert_exceptions_equal(
lfunc=getattr,
rfunc=sr.ceil,
lfunc_args_and_kwargs=([pdsr, "ceil"],),
check_exception_type=False,
expected_error_message="Series of dtype `category` cannot "
"perform the operation: ceil",
)
def test_categorical_element_indexing():
"""
Element indexing to a cat column must give the underlying object
not the numerical index.
"""
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
assert_eq(pdsr, sr)
assert_eq(pdsr.cat.codes, sr.cat.codes, check_dtype=False)
def test_categorical_masking():
"""
Test common operation for getting a all rows that matches a certain
category.
"""
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = | pd.Series(cat) | pandas.Series |
'''
This method uses these features
['dow', 'year', 'month', 'day_of_week', 'holiday_flg', 'min_visitors', 'mean_visitors', 'median_visitors', 'max_visitors', 'count_observations', 'air_genre_name', 'air_area_name', 'latitude', 'longitude', 'rs1_x', 'rv1_x', 'rs2_x', 'rv2_x', 'rs1_y', 'rv1_y', 'rs2_y', 'rv2_y', 'total_reserv_sum', 'total_reserv_mean', 'total_reserv_dt_diff_mean']
RMSE GradientBoostingRegressor: 0.501477019571
RMSE KNeighborsRegressor: 0.421517079307
'''
import glob, re
import numpy as np
import pandas as pd
from sklearn import *
from datetime import datetime
def RMSLE(y, pred):
return metrics.mean_squared_error(y, pred)**0.5
data = {
'tra': pd.read_csv('./data/air_visit_data.csv'),
'as': pd.read_csv('./data/air_store_info.csv'),
'hs': pd.read_csv('./data/hpg_store_info.csv'),
'ar': pd.read_csv('./data/air_reserve.csv'),
'hr': pd.read_csv('./data/hpg_reserve.csv'),
'id': pd.read_csv('./data/store_id_relation.csv'),
'tes': pd.read_csv('./data/sample_submission.csv'),
'hol': pd.read_csv('./data/date_info.csv').rename(columns={'calendar_date':'visit_date'})
}
# add 'air_store_id' to the last of data['hr']
data['hr'] = | pd.merge(data['hr'], data['id'], how='inner', on=['hpg_store_id']) | pandas.merge |
import os
import sys
import scipy
import glob
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from scipy.stats import zscore
from glmpca.glmpca import glmpca
def load_data(data_dir):
matrix_dir = data_dir
mat = scipy.io.mmread(glob.glob(data_dir+'/*.mtx*')[0]).tocsr().T
genes_path = glob.glob(data_dir+'/*genes*')[0]
gene_names = pd.read_csv(genes_path, index_col=0, header=None, sep='\t').iloc[:, 0].tolist()
barcodes_path = glob.glob(data_dir+'/*barcodes*')[0]
valid_bc = pd.read_csv(barcodes_path, header=None, sep='\t').iloc[:, 0].tolist()
# remove features not detected in all observations
data_df = pd.DataFrame(mat.todense(), index=valid_bc, columns=gene_names)
data_df = data_df.sort_index()
print(data_df.shape)
return data_df
def clean_data(data_df):
binarized = (data_df > 0)
# filter out 15-cell genes
gene_cell_counts = binarized.sum(axis=0)
use_genes = gene_cell_counts.index[gene_cell_counts > 15]
data_df = data_df.loc[:, use_genes]
# filter out 15-gene cells
cell_gene_counts = binarized.sum(axis=1)
use_cells = cell_gene_counts.index[cell_gene_counts > 15]
data_df = data_df.loc[use_cells, :]
# remove zero-columns
data_df = data_df.loc[:, (data_df > 0).any(axis=0)]
return data_df
def load_indexes(data_dir):
genes_path = glob.glob(directory+'/*genes*')[0]
gene_names = pd.read_csv(genes_path, index_col=0, header=None, sep='\t').iloc[:, 0].tolist()
barcodes_path = glob.glob(directory+'/*barcodes*')[0]
valid_bc = pd.read_csv(barcodes_path, header=None, sep='\t').iloc[:, 0].tolist()
return valid_bc, gene_names
def run_median(counts):
ls = counts.sum(axis = 1)
norm_counts = counts.div(ls, axis=0).mul(np.median(ls), axis=0)
norm_counts = pd.DataFrame(norm_counts, index=counts.index, columns=counts.index)
return norm_counts
def run_median_log(counts, pc=0.1):
ls = counts.sum(axis = 1)
norm_counts = counts.div(ls, axis=0).mul(np.median(ls), axis=0)
log_norm_counts = np.log2(norm_counts + pc) - np.log2(pc)
log_norm_counts = | pd.DataFrame(log_norm_counts, index=counts.index, columns=counts.index) | pandas.DataFrame |
import os
import pandas as pd
from cowidev.vax.utils.gsheets import VaccinationGSheet
from cowidev.vax.process import process_location
from cowidev.vax.cmd.utils import get_logger, print_eoe
from pandas.core.base import DataError
from pandas.errors import ParserError
from cowidev.utils import paths
logger = get_logger()
def read_csv(filepath):
try:
return pd.read_csv(filepath)
except:
raise ParserError(f"Error tokenizing data from file {filepath}")
def main_process_data(
gsheets_api,
google_spreadsheet_vax_id: str,
skip_complete: list = None,
skip_monotonic: dict = {},
skip_anomaly: dict = {},
):
print("-- Processing data... --")
# Get data from sheets
logger.info("Getting data from Google Spreadsheet...")
gsheet = VaccinationGSheet(gsheets_api, google_spreadsheet_vax_id)
df_manual_list = gsheet.df_list()
# Get automated-country data
logger.info("Getting data from output...")
automated = gsheet.automated_countries
filepaths_auto = [paths.out_vax(country) for country in automated]
df_auto_list = [read_csv(filepath) for filepath in filepaths_auto]
# Concatenate
vax = df_manual_list + df_auto_list
# Check that no location is present in both manual and automated data
manual_locations = set([df.location[0] for df in df_manual_list])
auto_locations = os.listdir(paths.SCRIPTS.OUTPUT_VAX_MAIN)
auto_locations = set([loc.replace(".csv", "") for loc in auto_locations])
common_locations = auto_locations.intersection(manual_locations)
if len(common_locations) > 0:
raise DataError(f"The following locations have data in both output/main_data and GSheet: {common_locations}")
# vax = [v for v in vax if v.location.iloc[0] == "Pakistan"] # DEBUG
# Process locations
def _process_location(df):
monotonic_check_skip = skip_monotonic.get(df.loc[0, "location"], [])
anomaly_check_skip = skip_anomaly.get(df.loc[0, "location"], [])
return process_location(df, monotonic_check_skip, anomaly_check_skip)
logger.info("Processing and exporting data...")
vax_valid = []
for df in vax:
if "location" not in df:
raise ValueError(f"Column `location` missing. df: {df.tail(5)}")
country = df.loc[0, "location"]
if country.lower() not in skip_complete:
df = _process_location(df)
vax_valid.append(df)
# Export
df.to_csv(paths.out_vax(country, public=True), index=False)
logger.info(f"{country}: SUCCESS ✅")
else:
logger.info(f"{country}: SKIPPED 🚧")
df = | pd.concat(vax_valid) | pandas.concat |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, date_range
import pandas._testing as tm
class TestDataFrameUpdate:
def test_update_nan(self):
# #15593 #15617
# test 1
df1 = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)})
df2 = DataFrame({"A": [None, 2, 3]})
expected = df1.copy()
df1.update(df2, overwrite=False)
tm.assert_frame_equal(df1, expected)
# test 2
df1 = DataFrame({"A": [1.0, None, 3], "B": date_range("2000", periods=3)})
df2 = DataFrame({"A": [None, 2, 3]})
expected = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)})
df1.update(df2, overwrite=False)
tm.assert_frame_equal(df1, expected)
def test_update(self):
df = DataFrame(
[[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
df.update(other)
expected = DataFrame(
[[1.5, np.nan, 3], [3.6, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
)
tm.assert_frame_equal(df, expected)
def test_update_dtypes(self):
# gh 3016
df = DataFrame(
[[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
df.update(other)
expected = DataFrame(
[[45.0, 45.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
tm.assert_frame_equal(df, expected)
def test_update_nooverwrite(self):
df = DataFrame(
[[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
df.update(other, overwrite=False)
expected = DataFrame(
[[1.5, np.nan, 3], [1.5, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 3.0]]
)
tm.assert_frame_equal(df, expected)
def test_update_filtered(self):
df = DataFrame(
[[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
df.update(other, filter_func=lambda x: x > 2)
expected = DataFrame(
[[1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"bad_kwarg, exception, msg",
[
# errors must be 'ignore' or 'raise'
({"errors": "something"}, ValueError, "The parameter errors must.*"),
({"join": "inner"}, NotImplementedError, "Only left join is supported"),
],
)
def test_update_raise_bad_parameter(self, bad_kwarg, exception, msg):
df = DataFrame([[1.5, 1, 3.0]])
with pytest.raises(exception, match=msg):
df.update(df, **bad_kwarg)
def test_update_raise_on_overlap(self):
df = DataFrame(
[[1.5, 1, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[2.0, np.nan], [np.nan, 7]], index=[1, 3], columns=[1, 2])
with pytest.raises(ValueError, match="Data overlaps"):
df.update(other, errors="raise")
def test_update_from_non_df(self):
d = {"a": Series([1, 2, 3, 4]), "b": Series([5, 6, 7, 8])}
df = DataFrame(d)
d["a"] = Series([5, 6, 7, 8])
df.update(d)
expected = DataFrame(d)
tm.assert_frame_equal(df, expected)
d = {"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]}
df = DataFrame(d)
d["a"] = [5, 6, 7, 8]
df.update(d)
expected = DataFrame(d)
| tm.assert_frame_equal(df, expected) | pandas._testing.assert_frame_equal |
import requests
import json
import pandas as pd
from datetime import datetime
import os
path = ".\\REPORTS"
isExist = os.path.exists(path)
if not isExist:
os.makedirs(path)
now = datetime.now()
excel_name = "Report-" + str(now.date()) + "_" + str(now.strftime("%H-%M-%S")) + ".xlsx"
excel_path = ".\\REPORTS\\" + excel_name
writer = pd.ExcelWriter(excel_path, engine='xlsxwriter')
writer.save()
df = pd.read_excel(excel_path)
df = | pd.DataFrame([],index=[], columns=['Switch S/N', 'Errors', 'Port with Errors', 'Warnings', 'Port with Warnings', 'Half Duplex', 'Port with Half Duplex']) | pandas.DataFrame |
import argparse
import pandas as pd
import numpy as np
import sys
p = str(Path(__file__).resolve().parents[2]) # directory two levels up from this file
sys.path.append(p)
from realism.realism_utils import make_orderbook_for_analysis
def create_orderbooks(exchange_path, ob_path):
MID_PRICE_CUTOFF = 10000
processed_orderbook = make_orderbook_for_analysis(exchange_path, ob_path, num_levels=1,
hide_liquidity_collapse=False)
cleaned_orderbook = processed_orderbook[(processed_orderbook['MID_PRICE'] > - MID_PRICE_CUTOFF) &
(processed_orderbook['MID_PRICE'] < MID_PRICE_CUTOFF)]
transacted_orders = cleaned_orderbook.loc[cleaned_orderbook.TYPE == "ORDER_EXECUTED"]
transacted_orders = transacted_orders.reset_index()
transacted_orders = transacted_orders.sort_values(by=['index', 'ORDER_ID']).iloc[1::2]
transacted_orders.set_index('index', inplace=True)
return processed_orderbook, transacted_orders, cleaned_orderbook
def calculate_market_impact(orders_df, ob_df, start_time, end_time, tao):
def create_bins(tao, start_time, end_time, orders_df, is_buy):
bins = pd.interval_range(start=start_time, end=end_time, freq=pd.DateOffset(seconds=tao))
binned = pd.cut(orders_df.loc[orders_df.BUY_SELL_FLAG == is_buy].index, bins=bins)
binned_volume = orders_df.loc[orders_df.BUY_SELL_FLAG == is_buy].groupby(binned).SIZE.agg(np.sum)
return binned_volume
def calculate_mid_move(row):
try:
t_start = row.name.left
t_end = row.name.right
mid_t_start = mid_resampled.loc[mid_resampled.index == t_start].item()
mid_t_end = mid_resampled.loc[mid_resampled.index == t_end].item()
if row.ti < 0:
row.mi = -1 * ((mid_t_end - mid_t_start) / mid_t_start) * 10000 # bps
else:
row.mi = (mid_t_end - mid_t_start) / mid_t_start * 10000 # bps
return row.mi
except:
pass
ob_df = ob_df.reset_index().drop_duplicates(subset='index', keep='last').set_index('index')
mid = ob_df.MID_PRICE
mid_resampled = mid.resample(f'{tao}s').ffill()
binned_buy_volume = create_bins(tao=int(tao), start_time=start_time, end_time=end_time, orders_df=orders_df,
is_buy=True).fillna(0)
binned_sell_volume = create_bins(tao=int(tao), start_time=start_time, end_time=end_time, orders_df=orders_df,
is_buy=False).fillna(0)
midf = pd.DataFrame()
midf['buy_vol'] = binned_buy_volume
midf['sell_vol'] = binned_sell_volume
midf['ti'] = midf['buy_vol'] - midf['sell_vol'] # Trade Imbalance
midf['pov'] = abs(midf['ti']) / (midf['buy_vol'] + midf['sell_vol']) # Participation of Volume in tao
midf['mi'] = None
midf.index = pd.interval_range(start=start_time, end=end_time, freq=pd.DateOffset(seconds=int(tao)))
midf.mi = midf.apply(calculate_mid_move, axis=1)
pov_bins = np.linspace(start=0, stop=1, num=1000, endpoint=False)
pov_binned = pd.cut(x=midf['pov'], bins=pov_bins)
midf['pov_bins'] = pov_binned
midf_gpd = midf.sort_values(by='pov_bins')
midf_gpd.index = midf_gpd.pov_bins
del midf_gpd['pov_bins']
df = pd.DataFrame(index=midf_gpd.index)
df['mi'] = midf_gpd['mi']
df['pov'] = midf_gpd['pov']
df = df.groupby(df.index).mean()
return df
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Market Impact Curve as described in AlmgrenChriss 05 paper')
parser.add_argument('--stock', default=None, required=True, help='stock (ABM)')
parser.add_argument('--date', default=None, required=True, help='date (20200101)')
parser.add_argument('--log', type=str, default=None, required=True, help='log folder')
parser.add_argument('--tao', type=int, required=True, help='Number of seconds in each bin')
args, remaining_args = parser.parse_known_args()
stock = args.stock
date = args.date
start_time = pd.Timestamp(date) + | pd.to_timedelta('09:30:00') | pandas.to_timedelta |
import pandas as pd
import numpy as np
def plot_line(direction, start, ax, plot_kwargs):
# plots a line on axis 'ax'.
# The line is defined by the direction 'direction' and start point 'start'.
# The direction and start should be 2x1 vectors but the only requierment is
# that they are of size == 2 (They are reshaped to 2x1).
# The function calculates the current size of the axis and plots the line
# within those limits.
# assertions of input vectors
assert direction.size == 2
assert start.size == 2
# reshape input vectors
direction = direction.reshape(2,1)
start = start.reshape(2,1)
# fetch limits from axis
x_min, x_max = ax.get_xlim()
y_min, y_max = ax.get_ylim()
# Calculate scalars (lambda) such that the direction reaches a limit.
# e.g. x_min = lambda[0]*direction + start
# Concatenate for simpler calculation
lim_vec = np.array([x_min, y_min, x_max, y_max]).reshape(4,1)
start_vec = np.vstack((start,start)).reshape(4,1)
dir_vec = np.vstack((direction, direction)).reshape(4,1)
# calculate lambdas
lambdas = (lim_vec-start_vec)/dir_vec
lambdas = lambdas.reshape(4)
# Edge cases when the direction is parallel to the x or y axis (leads to division by zero)
if direction[0] == 0:
lambdas[0] == -np.inf
lambdas[2] == np.inf
if direction[1] == 0:
lambdas[1] == -np.inf
lambdas[3] == np.inf
# two of the lamdas will be positive and two will be negative.
# we want to pick the lesser extreme of both cases.
sorted = np.sort(lambdas)
min_lambda = sorted[1]
max_lambda = sorted[2]
# calculate edge points of the line
p_min = min_lambda*direction + start
p_max = max_lambda*direction + start
# plot the line
ax.plot([p_min[0], p_max[0]],
[p_min[1], p_max[1]],**plot_kwargs)
def read_toy_data():
# Read toy data (fore- and background) from data folder and
# package them into a datafram.
# The dataframe has columns
# - x
# - y
# - Class (Case, Control)
###########################
foreground_path = "../data/toy/foreground.csv"
background_path = "../data/toy/background.csv"
data = pd.read_csv(foreground_path, header= None).values
X = pd.DataFrame(data = {'x' : data[0],
'y':data[1]})
data = pd.read_csv(background_path, header= None).values
Y = pd.DataFrame(data = {'x' : data[0],
'y':data[1]})
X['class'] = 'Case'
Y['class'] = 'Control'
return | pd.concat([X,Y], ignore_index=True) | pandas.concat |
import os
import sys
import glob
import argparse
from multiprocessing import Pool
from itertools import product
import pandas as pd
import numpy as np
from itea.regression import ITEA_regressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from filelock import FileLock
import warnings; warnings.filterwarnings('ignore')
filelock_name = './regression_benckmark.lock'
datasets_folder = './data sets'
results_fname = './regression_benchmark_res.csv'
itea_configuration = {
'gens' : 250,
'popsize' : 400,
'max_terms' : 15,
'expolim' : (-3, 3),
'verbose' : False,
'random_state' : None,
'simplify_method' : None,
'tfuncs' : {
'log' : np.log,
'sqrt.abs' : lambda x: np.sqrt(np.abs(x)),
'id' : lambda x: x,
'sin' : np.sin,
'cos' : np.cos,
'exp' : np.exp
},
'tfuncs_dx' : {
'log' : lambda x: 1/x,
'sqrt.abs' : lambda x: x/( 2*(np.abs(x)**(3/2)) ),
'id' : lambda x: np.ones_like(x),
'sin' : np.cos,
'cos' : lambda x: -np.sin(x),
'exp' : np.exp,
}
}
def experiment_worker(ds_name, rep):
with FileLock(filelock_name):
try:
ds_data = pd.read_csv(f'{datasets_folder}/{ds_name}.csv', delimiter=',')
except Exception as e:
print(f'Could not load {ds_name} data set. Got exception {e}')
sys.exit()
columns = [
'Dataset', 'Rep', 'RMSE_train', 'RMSE_test', 'Exectime', 'Expr']
results = {c:[] for c in columns}
resultsDF = pd.DataFrame(columns=columns)
if os.path.isfile(results_fname):
resultsDF = pd.read_csv(results_fname)
results = resultsDF.to_dict('list')
# Checking if this ds_name-repetition was already executed
if len(resultsDF[
(resultsDF['Dataset']==ds_name) & (resultsDF['Rep']==rep)])>0:
print(f'already executed experiment {ds_name}-{rep}')
return
print(f'Executing experiment {ds_name}-{rep}...')
# Random train and test split
X_train, X_test, y_train, y_test = train_test_split(
ds_data.iloc[:, :-1].astype('float64'),
ds_data.iloc[:, -1].astype('float64'),
test_size=0.33, random_state=None
)
reg = ITEA_regressor(
labell=ds_data.columns[:-1], **itea_configuration)
reg.fit(X_train, y_train)
itexpr = reg.bestsol_
# Locking to avoid parallel writing if multiple datasets are being
# executed
with FileLock(filelock_name):
# Retrieving the latest results
if os.path.isfile(results_fname):
resultsDF = | pd.read_csv(results_fname) | pandas.read_csv |
"""
train the conditional VAE network
"""
# import some packages
from tensorflow.keras.layers import Lambda, Input, Dense, Concatenate
from cbrain.layers import *
from tensorflow.keras.models import Model
from tensorflow.keras.losses import mse, binary_crossentropy
from tensorflow.keras.utils import plot_model
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import LearningRateScheduler,Callback
import numpy as np
import matplotlib.pyplot as plt
import argparse
import os
import tensorflow as tf
from cbrain.imports import *
from cbrain.utils import *
import pandas as ps
# reparameterization trick
# instead of sampling from Q(z|X), sample epsilon = N(0,I)
# z = z_mean + sqrt(var) * epsilon
def sampling(args):
"""Reparameterization trick by sampling from an isotropic unit Gaussian.
# Arguments
args (tensor): mean and log of variance of Q(z|X)
# Returns
z (tensor): sampled latent vector
based on VAE presented on keras webpage for keras version 1 /
recent keras VAE version can be seen on
https://keras.io/examples/generative/vae/
"""
z_mean, z_log_var = args
batch= K.shape(z_mean)[0]
dim=K.int_shape(z_mean)[1]
# by default, random_normal has mean = 0 and std = 1.0
epsilon=K.random_normal(shape=(batch,dim)) # epsilion= random_normal distributed tensor
sample_prob=z_mean+K.exp(0.5*z_log_var)*epsilon #exp= elementwise exponential
return sample_prob
# kl annealing for increase in reproduction skill of cVAE
klstart = 2
# number of epochs over which KL scaling is increased from 0 to 1
kl_annealtime = 5
class AnnealingCallback(Callback):
def __init__(self, weight):
self.weight = weight
def on_epoch_end (self, epoch, logs={}):
if epoch > klstart :
new_weight = min(K.get_value(self.weight) + (1./kl_annealtime), 1.)
K.set_value(self.weight, new_weight)
print ("Current KL Weight is " + str(K.get_value(self.weight)))
# the initial value of weight is 0
# define it as a keras backend variable
weight = K.variable(0.)
original_dim_input=int(65+64) # cVAE is trained on SP and CAM variables
original_dim_output=65 # and reproduces SP variables
# network parameters
intermediate_dim = 457 # number of nodes in first / last hidden layers of Encoder / Decoder
batch_size = 666
latent_dim = 5 # latent space width of signals passed trough encoder
epochs = 40
# network parameters
input_shape = (original_dim_input,)
out_shape=(original_dim_output,)
large_scale_fields=64
latent_dim_cond = int(latent_dim+large_scale_fields)# node size of latent space nodes + initial CAM variables --> input to decoder
## Encoder
inputs =Input(shape=input_shape, name='encoder_input')
x_0 =Dense(intermediate_dim, activation='relu')(inputs)
x_1 =Dense(intermediate_dim, activation='relu')(x_0)
x_2 =Dense(int(np.round(intermediate_dim/2)), activation='relu')(x_1)
x_3 =Dense(int(np.round(intermediate_dim/4)), activation='relu')(x_2)
x_4 =Dense(int(np.round(intermediate_dim/8)), activation='relu')(x_3)
x_5 =Dense(int(np.round(intermediate_dim/16)), activation='relu')(x_4)
z_mean = Dense(latent_dim, name='z_mean')(x_5)
z_log_var = Dense(latent_dim, name='z_log_var')(x_5)
# reparametrization trick
z = Lambda(sampling, output_shape=(latent_dim), name='z')([z_mean, z_log_var])
z_cond=Concatenate()([z,inputs[:,65:129]]) #here latent nodes and CAM variables are merged
# instantiate encoder model
encoder = Model([inputs], [z_mean, z_log_var,z, z_cond], name='encoder')
#output of encoder is mean, log-var, latent nodes z and (z+CAM) variables
encoder.summary()
##conditional Decoder
decoder_inputs =Input(shape=(latent_dim_cond,), name='decoder_input') # has 69 input nodes due to combination of z and CAM variables
x_1 =Dense(int(np.round(intermediate_dim/16)), activation='relu')(decoder_inputs)
x_2 =Dense(int(np.round(intermediate_dim/8)), activation='relu')(x_1)
x_3 =Dense(int(np.round(intermediate_dim/4)), activation='relu')(x_2)
x_4 =Dense(int(np.round(intermediate_dim/2)), activation='relu')(x_3)
x_5 =Dense(intermediate_dim, activation='relu')(x_4)
x_6 =Dense(intermediate_dim, activation='relu')(x_5)
outputs = Dense(original_dim_output, activation='elu')(x_6)
decoder = Model([decoder_inputs], outputs, name='decoder')
decoder.summary()
emul_cond_outputs=decoder(encoder([inputs])[3])
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
VAE_loss = K.mean(kl_loss*weight)
cond_VAE=Model(inputs,emul_cond_outputs)
cond_VAE.add_loss(VAE_loss)
cond_VAE.add_metric(kl_loss, name='kl_loss', aggregation='mean')
#loading scaling dictionary for SP variables
scale_array_2D=ps.read_csv('nn_config/scale_dicts/Scaling_enc_II_range_profiles.csv')
scale_array_1D= | ps.read_csv('nn_config/scale_dicts/Scaling_enc_II_range.csv') | pandas.read_csv |
import pandas as pd
import warnings
def collate_columns(data, column, reset_index=True):
"""Collate specified column from different DataFrames
Parameters
----------
* data : dict, of pd.DataFrames organized as {ZoneID: {ScenarioID: result_dataframe}}
* column : str, name of column to collate into single dataframe
Returns
----------
* dict, of pd.DataFrame of column data collated for each zone
"""
collated = {}
for zone, res in list(data.items()):
# need pd.Series to allow columns of different lengths
zone_d = {scen: pd.Series(res[scen].loc[:, column].values) for scen in res}
collated[zone] = pd.DataFrame(zone_d)
if reset_index:
collated[zone].reset_index(drop=True, inplace=True)
# End for
return collated
# End collate_columns()
def generate_ts_indicators(data, warmup_years=3, years_offset=3):
"""
Generate the normalized indicators for a time series.
Parameters
----------
* data : pd.DataFrame, dataframe to extract data from
* warmup_years : int, number of years that represent warmup period
* years_offset : int, number of years to offset by
Returns
----------
* tuple[List], time index and values
"""
index = []
values = []
offset_dt = pd.DateOffset(years=years_offset - 1)
# Initial datetimes
past_date = data.index[0] + | pd.DateOffset(years=warmup_years) | pandas.DateOffset |
import pkg_resources
from unittest.mock import sentinel
import pandas as pd
import pytest
import osmo_jupyter.dataset.combine as module
@pytest.fixture
def test_picolog_file_path():
return pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_picolog.csv"
)
@pytest.fixture
def test_calibration_file_path():
return pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_calibration_log.csv"
)
class TestOpenAndCombineSensorData:
def test_interpolates_data_correctly(
self, test_calibration_file_path, test_picolog_file_path
):
combined_data = module.open_and_combine_picolog_and_calibration_data(
calibration_log_filepaths=[test_calibration_file_path],
picolog_log_filepaths=[test_picolog_file_path],
).reset_index() # move timestamp index to a column
# calibration log has 23 columns, but we only need to check that picolog data is interpolated correctly
subset_combined_data_to_compare = combined_data[
[
"timestamp",
"equilibration status",
"setpoint temperature (C)",
"PicoLog temperature (C)",
]
]
expected_interpolation = pd.DataFrame(
[
{
"timestamp": "2019-01-01 00:00:00",
"equilibration status": "waiting",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 39,
},
{
"timestamp": "2019-01-01 00:00:01",
"equilibration status": "equilibrated",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 39.5,
},
{
"timestamp": "2019-01-01 00:00:03",
"equilibration status": "equilibrated",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 40,
},
{
"timestamp": "2019-01-01 00:00:04",
"equilibration status": "waiting",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 40,
},
]
).astype(
subset_combined_data_to_compare.dtypes
) # coerce datatypes to match
pd.testing.assert_frame_equal(
subset_combined_data_to_compare, expected_interpolation
)
class TestGetEquilibrationBoundaries:
@pytest.mark.parametrize(
"input_equilibration_status, expected_boundaries",
[
(
{ # Use full timestamps to show that it works at second resolution
pd.to_datetime("2019-01-01 00:00:00"): "waiting",
pd.to_datetime("2019-01-01 00:00:01"): "equilibrated",
pd.to_datetime("2019-01-01 00:00:02"): "equilibrated",
pd.to_datetime("2019-01-01 00:00:03"): "waiting",
},
[
{
"start_time": pd.to_datetime("2019-01-01 00:00:01"),
"end_time": pd.to_datetime("2019-01-01 00:00:02"),
}
],
),
(
{ # Switch to using only years as the timestamp for terseness and readability
pd.to_datetime("2019"): "waiting",
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
}
],
),
(
{
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
pd.to_datetime("2022"): "equilibrated",
pd.to_datetime("2023"): "waiting",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
},
{
"start_time": pd.to_datetime("2022"),
"end_time": pd.to_datetime("2022"),
},
],
),
(
{
pd.to_datetime("2019"): "waiting",
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
pd.to_datetime("2022"): "equilibrated",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
},
{
"start_time": pd.to_datetime("2022"),
"end_time": pd.to_datetime("2022"),
},
],
),
(
{
pd.to_datetime("2019"): "waiting",
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
pd.to_datetime("2022"): "equilibrated",
pd.to_datetime("2023"): "waiting",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
},
{
"start_time": pd.to_datetime("2022"),
"end_time": pd.to_datetime("2022"),
},
],
),
(
{
pd.to_datetime("2019"): "equilibrated",
pd.to_datetime("2020"): "waiting",
},
[
{
"start_time": pd.to_datetime("2019"),
"end_time": pd.to_datetime("2019"),
}
],
),
(
{
pd.to_datetime("2019"): "waiting",
pd.to_datetime("2020"): "equilibrated",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
}
],
),
(
{
pd.to_datetime("2019"): "equilibrated",
pd.to_datetime("2020"): "waiting",
pd.to_datetime("2021"): "equilibrated",
},
[
{
"start_time": pd.to_datetime("2019"),
"end_time": pd.to_datetime("2019"),
},
{
"start_time": pd.to_datetime("2021"),
"end_time": pd.to_datetime("2021"),
},
],
),
],
)
def test_finds_correct_edges(self, input_equilibration_status, expected_boundaries):
parsed_equilibration_boundaries = module.get_equilibration_boundaries(
equilibration_status=pd.Series(input_equilibration_status)
)
expected_equilibration_boundaries = pd.DataFrame(
expected_boundaries,
columns=["start_time", "end_time"],
dtype="datetime64[ns]",
).reset_index(
drop=True
) # Coerce to a RangeIndex when creating empty DataFrame
pd.testing.assert_frame_equal(
parsed_equilibration_boundaries, expected_equilibration_boundaries
)
class TestPivotProcessExperimentResults:
def test_combines_image_rows_by_ROI(self):
test_process_experiment_file_path = pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_process_experiment_result.csv"
)
test_process_experiment_data = pd.read_csv(
test_process_experiment_file_path, parse_dates=["timestamp"]
)
pivot_results = module.pivot_process_experiment_results_on_ROI(
experiment_df=test_process_experiment_data,
ROI_names=list(test_process_experiment_data["ROI"].unique()),
pivot_column_names=["r_msorm", "g_msorm"],
)
expected_results_data = (
pd.DataFrame(
[
{
"timestamp": pd.to_datetime("2019-01-01 00:00:00"),
"ROI 0 r_msorm": 0.5,
"ROI 1 r_msorm": 0.4,
"ROI 0 g_msorm": 0.4,
"ROI 1 g_msorm": 0.5,
"image": "image-0.jpeg",
},
{
"timestamp": pd.to_datetime("2019-01-01 00:00:02"),
"ROI 0 r_msorm": 0.3,
"ROI 1 r_msorm": 0.6,
"ROI 0 g_msorm": 0.6,
"ROI 1 g_msorm": 0.3,
"image": "image-1.jpeg",
},
]
)
.set_index("timestamp")
.astype(pivot_results.dtypes)
)
pd.testing.assert_frame_equal(pivot_results, expected_results_data)
class TestOpenAndCombineProcessExperimentResults:
def test_keeps_distinct_rows_for_images_with_same_timestamp(self):
test_process_experiment_file_path = pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_process_experiment_result.csv"
)
# Open the same file twice to ensure there are duplicate timestamps
pivot_results = module.open_and_combine_process_experiment_results(
process_experiment_result_filepaths=[
test_process_experiment_file_path,
test_process_experiment_file_path,
]
)
unique_timestamps = pivot_results.index.unique()
assert len(unique_timestamps) == len(pivot_results) / 2
class TestFilterEquilibratedImages:
def test_returns_only_equilibrated_images(self):
test_roi_data = pd.DataFrame(
[
{"timestamp": pd.to_datetime("2019-01-01"), "image": "image-0.jpeg"},
{"timestamp": pd.to_datetime("2019-01-03"), "image": "image-1.jpeg"},
]
).set_index("timestamp")
test_equilibration_boundaries = pd.Series(
{
"start_time": pd.to_datetime("2019-01-02"),
"end_time": pd.to_datetime("2019-01-04"),
}
)
equilibrated_image_data = module.filter_equilibrated_images(
equilibration_range=test_equilibration_boundaries, df=test_roi_data
)
expected_equilibrated_image_data = test_roi_data[1:]
pd.testing.assert_frame_equal(
equilibrated_image_data, expected_equilibrated_image_data
)
class TestGetImagesByExperiment:
def test_combines_experiment_metadata_correctly(self, mocker):
mock_image_data = pd.DataFrame(
{
"experiment_name": [
sentinel.experiment_1,
sentinel.experiment_1,
sentinel.experiment_2,
],
"image_filename": [
sentinel.image_1,
sentinel.image_2,
sentinel.image_3,
],
}
)
mocker.patch.object(
module, "get_all_experiment_image_filenames", return_value=mock_image_data
)
mocker.patch.object(
module,
"datetime_from_filename",
side_effect=[
pd.to_datetime("2019-01-01 00:00:01"),
pd.to_datetime("2019-01-01 00:00:02"),
| pd.to_datetime("2019-01-01 00:00:03") | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""id3_algorithm.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1yJOjE_DE32TWGNSS0KlA9j9m1SpE_txk
"""
"""
Make the imports of python packages needed
"""
import pandas as pd
import numpy as np
from pprint import pprint
import math
from sklearn.feature_selection import chi2
'''
DATASET LOADING AND PREPROCESSING
'''
dataset = pd.read_table('Training_Data.txt', delim_whitespace=True, names=('A', 'Target'))#Importing datasets and giving names
val_data = | pd.read_table('Validation_Data.txt', delim_whitespace=True, names=('A', 'Target')) | pandas.read_table |
import pandas as pd
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
import shap
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
# from .utils import Boba_Utils as u
class Boba_Model_Diagnostics():
def __init__(self):
pass
def run_model_diagnostics(self, model, X_train, X_test, y_train, y_test, target):
self.get_model_stats(model, X_train, X_test, y_train, y_test, target)
self.plot_shap_imp(model,X_train)
self.plot_shap_bar(model,X_train)
self.residual_plot(model,X_test,y_test,target)
self.residual_density_plot(model,X_test,y_test,target)
self.identify_outliers(model, X_test, y_test,target)
self.residual_mean_plot(model,X_test,y_test,target)
self.residual_variance_plot(model,X_test,y_test,target)
self.PVA_plot(model,X_test,y_test,target)
self.inverse_PVA_plot(model,X_train,y_train,target)
self.estimates_by_var(model,X_train,y_train,target,'Age')
self.error_by_var(model,X_train,y_train,target,'Age')
self.volatility_by_var(model,X_train,y_train,target,'Age')
def get_model_stats(self, model, X_train, X_test, y_train, y_test, target):
train_pred = model.predict(X_train)
test_pred = model.predict(X_test)
test_RMSE = np.sqrt(mean_squared_error(y_test, test_pred)),
test_R2 = model.score(X_test,y_test),
test_MAE = mean_absolute_error(y_test, test_pred),
train_RMSE = np.sqrt(mean_squared_error(y_train, train_pred)),
train_R2 = model.score(X_train,y_train),
train_MAE = mean_absolute_error(y_train, train_pred),
df = pd.DataFrame(data = {'RMSE': np.round(train_RMSE,4),
'R^2': np.round(train_R2,4),
'MAE': np.round(train_MAE,4)}, index = ['train'])
df2 = pd.DataFrame(data = {'RMSE': np.round(test_RMSE,4),
'R^2': np.round(test_R2,4),
'MAE': np.round(test_MAE,4)}, index = ['test'])
print("Model Statistics for {}".format(target))
print('-'*40)
print(df)
print('-'*40)
print(df2)
print('-'*40)
def plot_shap_imp(self,model,X_train):
shap_values = shap.TreeExplainer(model).shap_values(X_train)
shap.summary_plot(shap_values, X_train)
plt.show()
def plot_shap_bar(self,model,X_train):
shap_values = shap.TreeExplainer(model).shap_values(X_train)
shap.summary_plot(shap_values, X_train, plot_type='bar')
plt.show()
def feature_imp(self,model,X_train,target):
sns.set_style('darkgrid')
names = X_train.columns
coef_df = pd.DataFrame({"Feature": names, "Importance": model.feature_importances_},
columns=["Feature", "Importance"])
coef_df = coef_df.sort_values('Importance',ascending=False)
coef_df
fig, ax = plt.subplots()
sns.barplot(x="Importance", y="Feature", data=coef_df.head(20),
label="Importance", color="b",orient='h')
plt.title("XGB Feature Importances for {}".format(target))
plt.show()
def residual_plot(self,model, X_test, y_test,target):
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
fig, ax = plt.subplots()
ax.scatter(pred, residuals)
ax.plot([pred.min(), pred.max()], [0, 0], 'k--', lw=4)
ax.set_xlabel('Predicted')
ax.set_ylabel('Residuals')
plt.title("Residual Plot for {}".format(target))
plt.show()
def residual_density_plot(self,model, X_test, y_test,target):
sns.set_style('darkgrid')
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
sns.distplot(residuals)
plt.title("Residual Density Plot for {}".format(target))
plt.show()
def residual_variance_plot(self, model, X_test, y_test,target):
try:
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
y_temp = y_test.copy()
y_temp['pred'] = pred
y_temp['residuals'] = residuals
res_var = y_temp.groupby(pd.qcut(y_temp[target], 10))['residuals'].std()
res_var.index = [1,2,3,4,5,6,7,8,9,10]
res_var = res_var.reset_index()
ax = sns.lineplot(x="index", y="residuals", data=res_var)
plt.title("Residual Variance plot for {}".format(target))
plt.xlabel("Prediction Decile")
plt.ylabel("Residual Variance")
plt.show()
except:
pass
def residual_mean_plot(self, model, X_test, y_test,target):
sns.set_style('darkgrid')
try:
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
y_temp = y_test.copy()
y_temp['pred'] = pred
y_temp['residuals'] = residuals
res_var = y_temp.groupby(pd.qcut(y_temp['pred'], 10))['residuals'].mean()
res_var.index = [1,2,3,4,5,6,7,8,9,10]
res_var = res_var.reset_index()
ax = sns.lineplot(x="index", y="residuals", data=res_var)
plt.title("Residual Mean plot for {}".format(target))
plt.xlabel("Prediction Decile")
plt.ylabel("Residual Mean")
plt.show()
except:
pass
def PVA_plot(self,model, X_test, y_test, target):
sns.set_style('darkgrid')
try:
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
y_temp = y_test.copy()
y_temp['predicted'] = pred
y_temp['residuals'] = residuals
pva = y_temp.groupby(pd.qcut(y_temp['predicted'], 10))[target,'predicted'].mean()
pva.index = [1,2,3,4,5,6,7,8,9,10]
pva = pva.reset_index()
pva = pva.rename(columns={target: "actual"})
df = pva.melt('index', var_name='cols', value_name='vals')
sns.factorplot(x="index", y="vals", hue='cols', data=df,legend_out=False)
plt.title("Predicted v Actual Chart by Deciles for {}".format(target))
plt.xlabel("Prediction Decile")
plt.ylabel("{}".format(target))
plt.legend(loc='upper left')
plt.show()
except:
pass
def inverse_PVA_plot(self, model,X_test, y_test,target):
sns.set_style('darkgrid')
try:
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
y_temp = y_test.copy()
y_temp['predicted'] = pred
y_temp['residuals'] = residuals
pva = y_temp.groupby(pd.qcut(y_temp[target], 10))[target,'predicted'].mean()
pva.index = [1,2,3,4,5,6,7,8,9,10]
pva = pva.reset_index()
pva = pva.rename(columns={target: "actual"})
df = pva.melt('index', var_name='cols', value_name='vals')
sns.factorplot(x="index", y="vals", hue='cols', data=df,legend_out=False)
plt.title("Actual v Predicted Chart by Deciles for {}".format(target))
plt.xlabel("Actual Decile")
plt.ylabel("{}".format(target))
plt.legend(loc='upper left')
plt.show()
except:
pass
def identify_outliers(self, model, X_test, y_test,target):
master_df = pd.read_csv('data/processed/'+self.position_group+'/master_df.csv',index_col=0)
index_list = list(X_test.index)
master_df = master_df.iloc[index_list,:]
pred_df = pd.DataFrame(data = {'pred':model.predict(X_test),
'residuals':pd.Series(model.predict(X_test),index=X_test.index) - pd.Series(y_test[target])},index=X_test.index)
master_df = pd.merge(master_df,pred_df,left_index=True,right_index=True)
master_df = master_df[['Season','Name',target,'Age','pred','residuals']]
print('Top 20 UnderEstimates')
print(master_df.sort_values('residuals',ascending=True).head(20))
print('-'*80)
print('Top 20 OverEstimates')
print(master_df.sort_values('residuals',ascending=True).tail(20))
def estimates_by_var(self, model, X_test, y_test,target,var):
sns.set_style('darkgrid')
master_df = pd.read_csv('data/processed/'+self.position_group+'/master_df.csv',index_col=0)
index_list = list(X_test.index)
master_df = master_df.iloc[index_list,:]
pred_df = pd.DataFrame(data = {'pred':model.predict(X_test),
'residuals':pd.Series(model.predict(X_test),index=X_test.index) - pd.Series(y_test[target])},index=X_test.index)
master_df = pd.merge(master_df,pred_df,left_index=True,right_index=True)
gb = master_df.groupby(master_df[var])['pred',target].mean()
gb = gb.reset_index()
gb = gb.rename(columns={target: "actual",'pred':'predicted'})
df = gb.melt(var, var_name='type', value_name='vals')
ax = sns.lineplot(x=var, y="vals", hue="type",data=df)
plt.title("Average Estimated {} by {}".format(target,var))
plt.xlabel("{}".format(var))
plt.ylabel("{}".format(target))
plt.xticks(np.arange(gb[var].min(), gb[var].max(), step=1),rotation=45)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles[1:], labels=labels[1:])
plt.show()
def error_by_var(self, model, X_test, y_test,target,var):
sns.set_style('darkgrid')
master_df = pd.read_csv('data/processed/'+self.position_group+'/master_df.csv',index_col=0)
index_list = list(X_test.index)
master_df = master_df.iloc[index_list,:]
pred_df = pd.DataFrame(data = {'pred':model.predict(X_test),
'residuals':pd.Series(model.predict(X_test),index=X_test.index) - pd.Series(y_test[target])},index=X_test.index)
master_df = pd.merge(master_df,pred_df,left_index=True,right_index=True)
gb = master_df.groupby(master_df[var])['residuals'].mean()
gb = gb.reset_index()
ax = sns.lineplot(x=var, y="residuals", data=gb)
plt.title("Average Error by {}".format(var))
plt.xlabel("{}".format(var))
plt.ylabel("Residual mean")
plt.show()
def volatility_by_var(self, model, X_test, y_test,target,var):
sns.set_style('darkgrid')
master_df = pd.read_csv('data/processed/'+self.position_group+'/master_df.csv',index_col=0)
index_list = list(X_test.index)
master_df = master_df.iloc[index_list,:]
pred_df = pd.DataFrame(data = {'pred':model.predict(X_test),
'residuals':pd.Series(model.predict(X_test),index=X_test.index) - pd.Series(y_test[target])},index=X_test.index)
master_df = | pd.merge(master_df,pred_df,left_index=True,right_index=True) | pandas.merge |
import os
import re
import gzip
import shutil
import gzip
import subprocess
import nibabel as nib
import ntpath
import pandas as pd
import numpy as np
import tempfile
import nibabel as nib
from nipype.interfaces.base import (TraitedSpec, File, traits, InputMultiPath, CommandLine, CommandLineInputSpec,
BaseInterface, OutputMultiPath, BaseInterfaceInputSpec, isdefined)
def nib_load_3d(fn):
img = nib.load(fn)
vol = img.get_data()
vol = vol.reshape(vol.shape[0:3])
img_3d = nib.Nifti1Image(vol, img.affine)
return img_3d
def cmd(command):
try:
output = subprocess.check_output(command,stderr=subprocess.STDOUT, shell=True, universal_newlines=True)
except subprocess.CalledProcessError as exc:
print("Status : FAIL", exc.returncode, exc.output)
exit(1)
else:
print("Output: \n{}\n".format(output))
def splitext(s):
try :
ssplit = os.path.basename(s).split('.')
ext='.'+'.'.join(ssplit[1:])
basepath= re.sub(ext,'',s)
return [basepath, ext]
except TypeError :
return s
def gz(ii, oo):
with open(ii, 'rb') as in_file:
with gzip.open(oo, 'wb') as out_file:
shutil.copyfileobj(in_file, out_file)
def gunzip(ii, oo):
with gzip.open(ii, 'rb') as in_file:
with open(oo, 'wb') as out_file:
shutil.copyfileobj(in_file, out_file)
def check_gz(in_file_fn) :
img, ext = splitext(in_file_fn)
if '.gz' in ext :
out_file_fn = tempfile.mkdtemp() + os.path.basename(img) + '.nii'
sif = img + '.sif'
if os.path.exists(sif) :
shutil.copy(sif, '/tmp/'+os.path.basename(img)+'.sif' )
gunzip(in_file_fn, out_file_fn)
return out_file_fn
else :
return in_file_fn
class separate_mask_labelsOutput(TraitedSpec):
out_file=traits.File(argstr="%s", desc="4D label image")
class separate_mask_labelsInput(TraitedSpec):
in_file=traits.File(argstr="%s", desc="3D label image")
out_file=traits.File(argstr="%s", desc="4D label image")
class separate_mask_labelsCommand(BaseInterface ):
input_spec = separate_mask_labelsInput
output_spec = separate_mask_labelsOutput
def _run_interface(self, runtime):
vol = nib.load(self.inputs.in_file)
data = vol.get_data()
data = data.reshape(*data.shape[0:3])
if not isdefined(self.inputs.out_file) :
self.inputs.out_file = self._gen_outputs(self.inputs.in_file)
unique = np.unique( data ).astype(int)
nUnique = len(unique)-1
out = np.zeros( [data.shape[0], data.shape[1], data.shape[2], nUnique] )
print('unique', unique)
print('shape',out.shape)
print('data', data.shape)
for t,i in enumerate( unique ) :
if i != 0 :
print(t-1, i )
out[ data == i, t-1 ] = 1
out_file=nib.Nifti1Image(out, vol.get_affine(), vol.header)
out_file.to_filename(self.inputs.out_file)
return(runtime)
def _gen_outputs(self, fn) :
fn_split = splitext(fn)
return os.getcwd() + os.sep + os.path.basename( fn_split[0] ) + "_4d" + fn_split[1]
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.out_file) :
self.inputs.out_file = self._gen_outputs(self.inputs.in_file)
outputs["out_file"] = self.inputs.out_file
return outputs
class concat_dfOutput(TraitedSpec):
out_file = traits.File(desc="Output file")
class concat_dfInput(BaseInterfaceInputSpec):
in_list = traits.List(mandatory=True, exists=True, desc="Input list")
out_file = traits.File(mandatory=True, desc="Output file")
test = traits.Bool(default=False, usedefault=True, desc="Flag for if df is part of test run of pipeline")
class concat_df(BaseInterface):
input_spec = concat_dfInput
output_spec = concat_dfOutput
def _run_interface(self, runtime):
df= | pd.DataFrame([]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_cointegration_signal [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_cointegration_signal&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-signals-cointegration).
# +
import numpy as np
import pandas as pd
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from datetime import datetime
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
from arpym.estimation import cointegration_fp, fit_var1, var2mvou
from arpym.tools import trade_quote_processing, trade_quote_spreading, add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_cointegration_signal-parameters)
delta_a = 10000 # time binning
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_cointegration_signal-implementation-step00): Load data
# +
path = '../../../databases/global-databases/high-frequency/db_stocks_highfreq/'
AMZN_q = pd.read_csv(path + 'AMZN/quote.csv', index_col=0, parse_dates=True)
AMZN_t = pd.read_csv(path + 'AMZN/trade.csv', index_col=0, parse_dates=True)
GOOG_q = pd.read_csv(path + 'GOOG/quote.csv', index_col=0, parse_dates=True)
GOOG_t = pd.read_csv(path + 'GOOG/trade.csv', index_col=0, parse_dates=True)
# Amazon quotes
t_A = np.array([pd.to_datetime(AMZN_q.index)[i].timestamp() for i
in range(len(AMZN_q.index))])
dates_quotes_A = np.array(pd.to_datetime(AMZN_q.index).date)
q_ask_A = AMZN_q['asksize'].values
p_ask_A = AMZN_q['ask'].values
q_bid_A = AMZN_q['bidsize'].values
p_bid_A = AMZN_q['bid'].values
# Amazon trades
t_q_A = np.array([pd.to_datetime(AMZN_t.index)[i].timestamp() for i
in range(len(AMZN_t.index))])
dates_trades_A = np.array(pd.to_datetime(AMZN_t.index).date)
p_last_A = AMZN_t['price'].values
delta_q_A = AMZN_t['volume'].values
delta_sgn_A = AMZN_t['sign'].values
match_A = AMZN_t['match'].values
# Google quotes
t_G = np.array([ | pd.to_datetime(GOOG_q.index) | pandas.to_datetime |
import os
from scipy.interpolate import interp1d
import numpy as np
import pandas as pd
from sn_design_dd_survey.utils import m5_to_flux5, srand, gamma, load
class SNR_m5:
"""
Class to estimate, for each band and each considered LC
the Signal-to-Noise Ratio vs fiveSigmaDepth
Parameters
---------------
inputDir: str
input directory where the LC file is located
refFile: str
name of the LC file
x1: float
SN stretch
color: float
x1 color
"""
def __init__(self, inputDir, refFile, x1=-2.0, color=0.2):
outfile = 'SNR_m5.npy'
if not os.path.isfile(outfile):
self.process_main(inputDir, refFile, x1, color)
resdf = pd.DataFrame(np.copy(np.load(outfile, allow_pickle=True)))
self.get_m5(resdf)
self.plot(resdf)
def process_main(self, inputDir, refFile, x1, color):
# load the reference file
refdata = pd.DataFrame(np.copy(load(inputDir, refFile)))
refdata['band'] = refdata['band'].map(lambda x: x.decode()[-1])
idc = (refdata['x1']-x1) < 1.e-5
idc &= (refdata['color']-color) < 1.e-5
refdata = refdata[idc]
# load the gamma file
#gamma = self.load('reference_files', 'gamma.hdf5')
# load mag to flux corresp
# mag_to_flux = np.load('reference_files/Mag_to_Flux_SNCosmo.npy')
mag_to_flux = m5_to_flux5('grizy')
# print(mag_to_flux.dtype)
# select exposure time of 30s and
#idx = np.abs(gamma['exptime']-30.) < 1.e-5
#selgamma = gamma[idx]
bands = 'grizy'
# get interpolators for gamma and magflux
gammadict = gamma(bands)
# magfluxdict = {}
"""
for b in bands:
io = selgamma['band'] == b
gammadict[b] = interp1d(
selgamma[io]['mag'], selgamma[io]['gamma'], bounds_error=False, fill_value=0.)
"""
# SNR vs m5 estimation
resdf = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
###############################################################################
### State Water Project #######################################################
###############################################################################
### First read, clean, organizing historical SWP delivery data, to be compared with modeled deliveries
# Read data ~~NOTE: before running this, save xlsx as csv, then manually fix column names by removing lead/trail whitespace, and replacing spaces between words with _
deliveries = pd.read_csv("calfews_src/data/input/SWP_delivery_data_2000_2018.csv")
# Clean white space, etc
deliveries.To_Reach = deliveries.To_Reach.apply(lambda x: x.replace(' ',''))
deliveries.Turnout = deliveries.Turnout.apply(lambda x: x.replace(' ',''))
deliveries.WT_Group = deliveries.WT_Group.apply(lambda x: x.replace(' ',''))
deliveries.Water_Type = deliveries.Water_Type.apply(lambda x: x.replace(' ',''))
deliveries.Agency_Name = deliveries.Agency_Name.apply(lambda x: x.replace(' ',''))
deliveries.JAN = pd.to_numeric(deliveries.JAN.apply(lambda x: x.replace(' ','').replace('-','0').replace(',','')))
deliveries.FEB = pd.to_numeric(deliveries.FEB.apply(lambda x: x.replace(' ','').replace('-','0').replace(',','')))
deliveries.MAR = pd.to_numeric(deliveries.MAR.apply(lambda x: x.replace(' ','').replace('-','0').replace(',','')))
deliveries.APR = pd.to_numeric(deliveries.APR.apply(lambda x: x.replace(' ','').replace('-','0').replace(',','')))
deliveries.MAY = pd.to_numeric(deliveries.MAY.apply(lambda x: x.replace(' ','').replace('-','0').replace(',','')))
deliveries.JUN = pd.to_numeric(deliveries.JUN.apply(lambda x: x.replace(' ','').replace('-','0').replace(',','')))
deliveries.JUL = pd.to_numeric(deliveries.JUL.apply(lambda x: x.replace(' ','').replace('-','0').replace(',','')))
deliveries.AUG = pd.to_numeric(deliveries.AUG.apply(lambda x: x.replace(' ','').replace('-','0').replace(',','')))
deliveries.SEP = pd.to_numeric(deliveries.SEP.apply(lambda x: x.replace(' ','').replace('-','0').replace(',','')))
deliveries.OCT = pd.to_numeric(deliveries.OCT.apply(lambda x: x.replace(' ','').replace('-','0').replace(',','')))
deliveries.NOV = pd.to_numeric(deliveries.NOV.apply(lambda x: x.replace(' ','').replace('-','0').replace(',','')))
deliveries.DEC = pd.to_numeric(deliveries.DEC.apply(lambda x: x.replace(' ','').replace('-','0').replace(',','')))
# create time series
swp_historical = pd.DataFrame(index=range(deliveries.shape[0]*12))
swp_historical['Year'] = 0
swp_historical['Month'] = 0
swp_historical['Day'] = 1
swp_historical['Wateryear'] = 0
swp_historical['Bill_To_Agency_Name'] = ' '
swp_historical['Agency_Name'] = ' '
swp_historical['Water_Type'] = ' '
swp_historical['WT_Group'] = ' '
swp_historical['To_Reach'] = ' '
swp_historical['Turnout'] = ' '
swp_historical['delivery_taf'] = 0.0
dum = np.repeat(np.arange(deliveries.shape[0]), 12)
for n in ['Year','Bill_To_Agency_Name','Agency_Name','Water_Type','WT_Group','To_Reach','Turnout']:
swp_historical[n] = deliveries.iloc[dum][n].values
swp_historical['Month'] = np.tile(np.arange(1,13), deliveries.shape[0])
swp_historical['Wateryear'] = swp_historical['Year']
swp_historical['Wateryear'].loc[swp_historical['Month'] > 9] = swp_historical['Wateryear'].loc[swp_historical['Month'] > 9] + 1
swp_historical['delivery_taf'].iloc[0:swp_historical.shape[0]:12] = deliveries['JAN'].values / 1000
swp_historical['delivery_taf'].iloc[1:swp_historical.shape[0]:12] = deliveries['FEB'].values / 1000
swp_historical['delivery_taf'].iloc[2:swp_historical.shape[0]:12] = deliveries['MAR'].values / 1000
swp_historical['delivery_taf'].iloc[3:swp_historical.shape[0]:12] = deliveries['APR'].values / 1000
swp_historical['delivery_taf'].iloc[4:swp_historical.shape[0]:12] = deliveries['MAY'].values / 1000
swp_historical['delivery_taf'].iloc[5:swp_historical.shape[0]:12] = deliveries['JUN'].values / 1000
swp_historical['delivery_taf'].iloc[6:swp_historical.shape[0]:12] = deliveries['JUL'].values / 1000
swp_historical['delivery_taf'].iloc[7:swp_historical.shape[0]:12] = deliveries['AUG'].values / 1000
swp_historical['delivery_taf'].iloc[8:swp_historical.shape[0]:12] = deliveries['SEP'].values / 1000
swp_historical['delivery_taf'].iloc[9:swp_historical.shape[0]:12] = deliveries['OCT'].values / 1000
swp_historical['delivery_taf'].iloc[10:swp_historical.shape[0]:12] = deliveries['NOV'].values / 1000
swp_historical['delivery_taf'].iloc[11:swp_historical.shape[0]:12] = deliveries['DEC'].values / 1000
swp_historical['Date'] = pd.to_datetime(swp_historical[['Year','Month','Day']])
del deliveries
# plt.plot_date(swp_historical['Date'], swp_historical['delivery_taf'], marker='o', alpha=0.3)
# aggregate different groups/reaches together for comparison to model output. See "model.py", initialize_water_districts function, for district 3-letter uppercase abbreviations
swp_historical['Agency_Group'] = 'other'
swp_historical.Agency_Group.loc[swp_historical.Agency_Name == 'DUDLEYRIDGEWATERDISTRICT'] = 'DLR'
swp_historical.Agency_Group.loc[swp_historical.Agency_Name == 'TULARELAKEBASINWSD'] = 'TLB'
swp_historical.Agency_Group.loc[swp_historical.Agency_Name == 'WESTLANDSWATERDISTRICT'] = 'WSL'
swp_historical.Agency_Group.loc[swp_historical.Agency_Name == 'MADERAIRRIGATIONDISTRICT'] = 'MAD'
swp_historical.Agency_Group.loc[swp_historical.Agency_Name == 'KERN-TULAREWATERDISTRICT'] = 'KRT'
swp_historical.Agency_Group.loc[swp_historical.Agency_Name == 'LOWERTULERIVER'] = 'LWT'
swp_historical.Agency_Group.loc[swp_historical.Agency_Name == 'KERNCOUNTYWA'] = 'kcwa'
swp_historical.Agency_Group.loc[swp_historical.Agency_Name == 'FRIANTWATERUSERSAUTHORITY'] = 'fwua'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Name == 'BROWNSVALLEYIRRIGATIONDIST') |
(swp_historical.Agency_Name == 'BUTTEWATERDISTRICT') |
(swp_historical.Agency_Name == 'CITYOFYUBACITY') |
(swp_historical.Agency_Name == 'COUNTYOFBUTTE') |
(swp_historical.Agency_Name == 'GARDENHIGHWAYMUTUALWATERCOMPANY') |
(swp_historical.Agency_Name == 'PLACERCOUNTYWATERAGENCY') |
(swp_historical.Agency_Name == 'PLUMASMUTUALWATERCOMPANY') |
(swp_historical.Agency_Name == 'RICHVALEIRRIGATIONDISTRICT') |
(swp_historical.Agency_Name == 'SOUTHFEATHERWATER&POWERAGENCY') |
(swp_historical.Agency_Name == 'SOUTHSUTTERWATERDISTRICT') |
(swp_historical.Agency_Name == 'SUTTEREXTENSIONWATERDISTRICT') |
(swp_historical.Agency_Name == 'THERMALITOWATERANDSEWERDISTRICT') |
(swp_historical.Agency_Name == 'WESTERNCANALWATERDISTRICT')] = 'sacramentoriver'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Name == 'NAPACOUNTYFC&WCD') |
(swp_historical.Agency_Name == 'SOLANOCOUNTYWATERAGENCY')] = 'northbay'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Name == 'ALAMEDACOUNTYFC&WCD-ZONE7') |
(swp_historical.Agency_Name == 'TRI-VALLEYWATERDISTRICT') |
(swp_historical.Agency_Name == 'ALAMEDACOUNTYWD')] = 'southbay'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Name == 'CITYOFTRACY')] = 'delta'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Name == 'DELPUERTOWATERDISTRICT') |
(swp_historical.Agency_Name == 'OAKFLATWATERDISTRICT') |
(swp_historical.Agency_Name == 'SANLUISWATERDISTRICT') |
(swp_historical.Agency_Name == 'SanLuis&DeltaMendotaWaterAuth') |
(swp_historical.Agency_Name == 'TRANQUILLITYIRRIGATIONDISTRICT')] = 'deltamendotacanal'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Name == 'PACHECOPASSWATERDISTRICT') |
(swp_historical.Agency_Name == 'SANBENITOWATERDISTRICT') |
(swp_historical.Agency_Name == 'SANTACLARAVALLEYWD')] = 'pachecotunnel'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Name == 'SANLUISOBISPOCOUNTYFC&WCD') |
(swp_historical.Agency_Name == 'SANTABARBARACOUNTYFC&WCD')] = 'centralcoast'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Name == 'HILLSVALLEYIRRIGATIONDISTRICT')] = 'other_maderacanal'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Name == 'CITYOFDOSPALOS') |
(swp_historical.Agency_Name == 'MERCEDIRRIGATIONDISTRICT')] = 'other_mercedco'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Name == 'BROADVIEWWATERDISTRICT') |
(swp_historical.Agency_Name == 'CITYOFCOALINGA') |
(swp_historical.Agency_Name == 'CITYOFHURON') |
(swp_historical.Agency_Name == 'COUNTYOFFRESNO') |
(swp_historical.Agency_Name == 'PANOCHEWATERDISTRICT')] = 'other_fresnoco'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Name == 'AVENAL,CITYOF') |
(swp_historical.Agency_Name == 'AvenalStatePrison') |
(swp_historical.Agency_Name == 'COUNTYOFKINGS') |
(swp_historical.Agency_Name == 'EMPIREWESTSIDEID')] = 'other_kingsco'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Name == 'COUNTYOFTULARE') |
(swp_historical.Agency_Name == 'PIXLEYIRRIGATIONDISTRICT')] = 'other_tulareco'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Name == 'RAGGULCHWATERDISTRICT')] = 'other_kernco'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Name == 'ANTELOPEVALLEY-EASTKERNWA') |
(swp_historical.Agency_Name == 'CASTAICLAKEWA') |
(swp_historical.Agency_Name == 'COACHELLAVALLEYWD') |
(swp_historical.Agency_Name == 'CRESTLINE-LAKEARROWHEADWA') |
(swp_historical.Agency_Name == 'DESERTWATERAGENCY') |
(swp_historical.Agency_Name == 'LITTLEROCKCREEKID') |
(swp_historical.Agency_Name == 'MOJAVEWATERAGENCY') |
(swp_historical.Agency_Name == 'PALMDALEWATERDISTRICT') |
(swp_historical.Agency_Name == 'SANBERNARDINOVALLEYMWD') |
(swp_historical.Agency_Name == 'SANGABRIELVALLEYMWD') |
(swp_historical.Agency_Name == 'SANGORGONIOPASSWA') |
(swp_historical.Agency_Name == 'THEMETROPOLITANWATERDISTRICTOF') |
(swp_historical.Agency_Name == 'VENTURACOUNTYWPD')] = 'socal'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Name == 'CADEPTOFFISHANDGAME') |
(swp_historical.Agency_Name == 'CADEPTOFPARKSANDRECREATION') |
(swp_historical.Agency_Name == 'CADEPTOFWATERRESOURCES') |
(swp_historical.Agency_Name == 'EWA-STATE') |
(swp_historical.Agency_Name == 'KERNNATIONALWILDLIFEREFUGE') |
(swp_historical.Agency_Name == "SANJOAQUINVALLEYNAT'LCEMETERY") |
(swp_historical.Agency_Name == 'USBUREAUOFRECLAMATION')] = 'statefederal'
# now split up KCWA contractors using Reach numbers from Table 22 of State Water Project Operations Data monthly report (Dec 1998 and Jun 2017)
# R8D is either DLR or TLB or County of Kings accalfews_srcing to pdf, but only one here says DLR for Bill_To_Agency_Name
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R8D')] = 'LHL'
# R9 Lost Hills or Berrenda Mesa
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R9')] = 'LHL'
# R10A - divide by turnout (see SWP_turnouts.csv)
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R10A') &
((swp_historical.Turnout == 'T219') |
(swp_historical.Turnout == 'T220') |
(swp_historical.Turnout == 'T221'))] = 'LHL'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R10A') &
((swp_historical.Turnout == 'T395') |
(swp_historical.Turnout == 'T225'))] = 'SMI'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R10A') &
(swp_historical.Turnout == 'T224')] = 'BLR'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R10A') &
(swp_historical.Turnout == 'T228')] = 'BVA_statefederal'
# R11B always Belridge
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R11B')] = 'BLR'
# R12D - only turnout can be Belridge or West Kern
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R12D')] = 'BLR_WKN'
# R12E - divide by turnout (see SWP_turnouts.csv)
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R12E') &
((swp_historical.Turnout == 'T235') |
(swp_historical.Turnout == 'T236') |
(swp_historical.Turnout == 'T433'))] = 'BVA'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R12E')] = 'CVC-KCWA_CVC-DLR_CVC-BVA_ARV_LWT_TUL_COF_pixley_haciendaDWR'
# R13B - divide by turnout (see SWP_turnouts.csv)
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R13B') &
((swp_historical.Turnout == 'T242') |
(swp_historical.Turnout == 'T245'))] = 'BVA_HML'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R13B') &
(swp_historical.Turnout == 'T244')] = 'KWB'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R13B') &
(swp_historical.Turnout == 'T241')] = 'BVA'
# R14A - divide by turnout (see SWP_turnouts.csv)
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R14A') &
((swp_historical.Turnout == 'T247') |
(swp_historical.Turnout == 'T248') |
(swp_historical.Turnout == 'T249'))] = 'WRM'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R14A')] = 'WKN'
# R14B always Wheeler Ridge-Maricopa
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R14B')] = 'WRM'
# R14C - divide by turnout (see SWP_turnouts.csv)
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R14C') &
((swp_historical.Turnout == 'T253') |
(swp_historical.Turnout == 'T254'))] = 'WRM'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R14C') &
(swp_historical.Turnout == 'T255')] = 'ARV'
# R15A WRM
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R15A')] = 'WRM'
# R16A Wheeler Ridge-Maricopa or Tehachapi Cummings
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R16A')] = 'WRM_THC'
# R16A - divide by turnout (see SWP_turnouts.csv)
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R16A') &
(swp_historical.Turnout == 'T265')] = 'THC'
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R16A')] = 'WRM'
# R31A is start of Coastal Branch - berrenda mesa is only kcwa
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCB1-R31A')] = 'BDM'
# R17E is start of East Branch after Edmonston pumping plant - Tejon-Castac has delivery
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa') &
(swp_historical.To_Reach == 'VCA-R17E')] = 'TJC'
# Rest are unknown - don't see any kcwa members in pdfs. Reaches VCA-R3A, VCA-R4, VCB2-R33A.
swp_historical.Agency_Group.loc[(swp_historical.Agency_Group == 'kcwa')] = 'kcwa_unknown'
# now aggregate by water types into categories used by ORCA (See SWP_waterTypes_Lookup.csv)
swp_historical['WT_model'] = 'other'
swp_historical['banking_partner'] = 'none'
# TableA delivery
swp_historical.WT_model.loc[(swp_historical.Water_Type == 'TBLA01') |
(swp_historical.Water_Type == 'TBLA02') |
(swp_historical.Water_Type == 'TBLA07') |
(swp_historical.Water_Type == 'TBLA08') |
(swp_historical.Water_Type == 'TBLAADV') |
(swp_historical.WT_Group == 'TableA')
] = 'tableA_delivery'
# TableA flood (Article 21)
swp_historical.WT_model.loc[swp_historical.WT_Group == 'Article21'] = 'tableA_flood'
# TableA turnback - also set sign -1 for sold water
swp_historical.WT_model.loc[(swp_historical.WT_Group == 'Bought') |
(swp_historical.WT_Group == 'Sold-CO') |
(swp_historical.WT_Group == 'Sold-TBLA') |
(swp_historical.WT_Group == 'TurnbackPools')
] = 'tableA_turnback'
swp_historical.delivery_taf.loc[(swp_historical.WT_Group == 'Sold-CO') | (swp_historical.WT_Group == 'Sold-TBLA')] = \
-swp_historical.delivery_taf.loc[(swp_historical.WT_Group == 'Sold-CO') | (swp_historical.WT_Group == 'Sold-TBLA')]
# TableA carryover
swp_historical.WT_model.loc[(swp_historical.WT_Group == 'Carryover') |
(swp_historical.WT_Group == 'SLRStorageCarryover')
] = 'tableA_carryover'
# cvc delivery
swp_historical.WT_model.loc[(swp_historical.Water_Type == 'CVCSPLY') |
(swp_historical.Water_Type == 'JNTPNTCVC') |
(swp_historical.Water_Type == 'CVCENT') |
(swp_historical.Water_Type == 'CVC-OVER') |
(swp_historical.Water_Type == 'CVCPOD') |
(swp_historical.Water_Type == 'CVCTRN') |
(swp_historical.Water_Type == 'OPERXCH') |
((swp_historical.Water_Type == 'CVPSPLY') &
((swp_historical.Agency_Name == 'KERNCOUNTYWA') |
(swp_historical.Agency_Name == 'COUNTYOFFRESNO') |
(swp_historical.Agency_Name == 'COUNTYOFTULARE') |
(swp_historical.Agency_Name == 'TULARELAKEBASINWSD') |
(swp_historical.Agency_Name == 'HILLSVALLEYIRRIGATIONDISTRICT') |
(swp_historical.Agency_Name == 'TRI-VALLEYWATERDISTRICT') |
(swp_historical.Agency_Name == 'LOWERTULERIVER')))
] = 'cvc_delivery'
# cvc flood
swp_historical.WT_model.loc[swp_historical.WT_Group == 'CVC215'] = 'cvc_flood'
# cvc undelivered
swp_historical.WT_model.loc[swp_historical.WT_Group == 'CVCREMAIN'] = 'cvc_undelivered'
# cvpdelta delivery
swp_historical.WT_model.loc[(swp_historical.Water_Type == 'CVPPOD') |
(swp_historical.Water_Type == 'DCVCCN') |
(swp_historical.Water_Type == 'JNTPNT') |
((swp_historical.Water_Type == 'CVPSPLY') &
((swp_historical.Agency_Name == 'SANTACLARAVALLEYWD') |
(swp_historical.Agency_Name == 'USBUREAUOFRECLAMATION')|
(swp_historical.Agency_Name == 'THEMETROPOLITANWATERDISTRICTOF')))
] = 'cvpdelta_delivery'
# banking recovery. Also get banking partner from key.
swp_historical.WT_model.loc[(swp_historical.Water_Type == 'CXAERV') |
(swp_historical.Water_Type == 'CXKBRV') |
(swp_historical.Water_Type == 'CXSTRV') |
(swp_historical.Water_Type == 'CXKDRV') |
(swp_historical.WT_Group == 'WaterBankPumpin') |
(swp_historical.WT_Group == 'WaterBankRecovery')
] = 'recover_banked'
swp_historical.banking_partner.loc[swp_historical.WT_Group == 'CXAERV'] = 'AEMWD'
swp_historical.banking_partner.loc[swp_historical.WT_Group == 'CXKBRV'] = 'KWB'
swp_historical.banking_partner.loc[swp_historical.WT_Group == 'CXSTRV'] = 'SMI'
swp_historical.banking_partner.loc[swp_historical.WT_Group == 'CXKDRV'] = 'KND'
swp_historical.banking_partner.loc[swp_historical.WT_Group == 'PUMPINAE'] = 'AEMWD'
swp_historical.banking_partner.loc[swp_historical.WT_Group == 'PUMPINKD'] = 'KND'
swp_historical.banking_partner.loc[swp_historical.WT_Group == 'PUMPINKWB'] = 'KWB'
swp_historical.banking_partner.loc[swp_historical.WT_Group == 'PUMPINST'] = 'SMI'
swp_historical.banking_partner.loc[swp_historical.WT_Group == '78RCV'] = 'SOC'
swp_historical.banking_partner.loc[swp_historical.WT_Group == '82RCV'] = 'SOC'
swp_historical.banking_partner.loc[swp_historical.WT_Group == 'KWBRCV'] = 'KWB'
swp_historical.banking_partner.loc[swp_historical.WT_Group == 'STEWA'] = 'SMI'
swp_historical.banking_partner.loc[swp_historical.WT_Group == 'STEWA-X'] = 'SMI'
swp_historical.banking_partner.loc[swp_historical.WT_Group == 'STRCV'] = 'SMI'
swp_historical.banking_partner.loc[swp_historical.WT_Group == 'STRCV-X'] = 'SMI'
# exchanged Table A surface water. Agency gets delivery of water from banking partner, gives parnter paper TBLA credit.
swp_historical.WT_model.loc[(swp_historical.Water_Type == 'TBLAXAE') |
(swp_historical.Water_Type == 'TBLAXKD') |
(swp_historical.Water_Type == 'TBLAXKWB') |
(swp_historical.Water_Type == 'TBLAXST') |
(swp_historical.Water_Type == 'TBLAXSPLY')
] = 'exchange_SW'
swp_historical.banking_partner.loc[swp_historical.WT_Group == 'TBLAXAE'] = 'AEMWD'
swp_historical.banking_partner.loc[swp_historical.WT_Group == 'TBLAXKD'] = 'KND'
swp_historical.banking_partner.loc[swp_historical.WT_Group == 'TBLAXKWB'] = 'KWB'
swp_historical.banking_partner.loc[swp_historical.WT_Group == 'TBLAXST'] = 'SMI'
# create indicator variable to decide if should be included in total deliveries count
swp_historical['is_delivery'] = 0
swp_historical['is_delivery'].loc[swp_historical.WT_model == 'tableA_delivery'] = 1
swp_historical['is_delivery'].loc[swp_historical.WT_model == 'tableA_flood'] = 1
swp_historical['is_delivery'].loc[swp_historical.WT_model == 'cvc_delivery'] = 1
swp_historical['is_delivery'].loc[swp_historical.WT_model == 'cvc_flood'] = 1
swp_historical['is_delivery'].loc[swp_historical.WT_model == 'cvpdelta_delivery'] = 1
swp_historical['is_delivery'].loc[swp_historical.WT_model == 'recover_banked'] = 1
swp_historical['Project'] = 'SWP'
# ### plot historical deliveries
# dum = (swp_historical.To_Reach == 'VCA-R12E') & (swp_historical.delivery_taf > 0)
# dum2 = dum & (swp_historical.WT_model == 'tableA_delivery')
# dum3 = dum & (swp_historical.WT_model == 'tableA_flood')
# dum4 = dum & (swp_historical.WT_model == 'tableA_turnback')
# dum5 = dum & (swp_historical.WT_model == 'tableA_carryover')
# dum6 = dum & (swp_historical.WT_model == 'recover_banked')
# dum7 = dum & (swp_historical.WT_model == 'exchange_SW')
# dum8 = dum & (swp_historical.WT_model == 'other')
#
# plt.plot_date(swp_historical['Date'].loc[dum2], swp_historical['delivery_taf'].loc[dum2], marker='o', alpha=0.3)
# plt.plot_date(swp_historical['Date'].loc[dum3], swp_historical['delivery_taf'].loc[dum3], marker='o', alpha=0.3)
# plt.plot_date(swp_historical['Date'].loc[dum4], swp_historical['delivery_taf'].loc[dum4], marker='o', alpha=0.3)
# plt.plot_date(swp_historical['Date'].loc[dum5], swp_historical['delivery_taf'].loc[dum5], marker='o', alpha=0.3)
# plt.plot_date(swp_historical['Date'].loc[dum6], swp_historical['delivery_taf'].loc[dum6], marker='o', alpha=0.3)
# plt.plot_date(swp_historical['Date'].loc[dum7], swp_historical['delivery_taf'].loc[dum7], marker='o', alpha=0.3)
# plt.plot_date(swp_historical['Date'].loc[dum8], swp_historical['delivery_taf'].loc[dum8], marker='o', alpha=0.3)
# plt.legend(['delivery','flood','turnback','carryover','recovery','exchange_SW', 'other'])
### save
swp_historical.to_csv('calfews_src/data/input/SWP_delivery_cleaned.csv',index=False)
###############################################################################
### Central Valley Project ####################################################
###############################################################################
### Read/clean/organize historical CVP deliveries
# Read data, 1996-2016. **Note: 2011-2016 convert pdfs to csv online, then clean csv in excel to get into readable form.
# 1996-2010 download text tables & clean/convert to csv in excel.
years = np.arange(1996,2017)
tables = [22,23,26]
deliveries = pd.DataFrame({'WaterUser':[],'Canal':[],'Year':[],'Jan':[],'Feb':[],'Mar':[],'Apr':[],'May':[],'Jun':[],
'Jul':[],'Aug':[],'Sep':[],'Oct':[],'Nov':[],'Dec':[], 'Notes':[]})
for y in years:
for t in tables:
file = 'calfews_src/data/input/cvp_historical/table_' + str(t) + '_' + str(y) + '.csv'
try:
deliveries_temp = pd.read_csv(file, skiprows=6)
except:
file = 'calfews_src/data/input/cvp_historical/table_' + str(t) + '_' + str(y) + '.xlsx'
deliveries_temp = | pd.read_excel(file, skiprows=6) | pandas.read_excel |
from typing import List
import pandas as pd
import numpy as np
from .engineobj import SqPandasEngine
from suzieq.utils import convert_macaddr_format_to_colon
class NetworkObj(SqPandasEngine):
@staticmethod
def table_name():
return 'network'
def get(self, **kwargs):
"""Get the information requested"""
view = kwargs.get('view', self.iobj.view)
columns = kwargs.pop('columns', ['default'])
addnl_fields = kwargs.pop('addnl_fields', [])
user_query = kwargs.pop('query_str', '')
os = kwargs.pop('os', [])
model = kwargs.pop('model', [])
vendor = kwargs.pop('vendor', [])
os_version = kwargs.pop('version', [])
namespace = kwargs.pop('namespace', [])
drop_cols = []
if os or model or vendor or os_version:
df = self._get_table_sqobj('device').get(
columns=['namespace', 'hostname', 'os', 'model', 'vendor',
'version'],
os=os,
model=model,
vendor=vendor,
version=os_version,
namespace=namespace,
**kwargs)
else:
df = self._get_table_sqobj('device').get(
columns=['namespace', 'hostname'], namespace=namespace,
**kwargs)
if df.empty:
return pd.DataFrame()
namespace = df.namespace.unique().tolist()
hosts = df.hostname.unique().tolist()
dev_nsgrp = df.groupby(['namespace'])
# Get list of namespaces we're polling
pollerdf = self._get_table_sqobj('sqPoller') \
.get(columns=['namespace', 'hostname', 'service', 'status', 'timestamp'],
namespace=namespace, hostname=hosts)
if pollerdf.empty:
return pd.DataFrame()
nsgrp = pollerdf.groupby(by=['namespace'])
pollerns = sorted(pollerdf.namespace.unique().tolist())
newdf = pd.DataFrame({
'namespace': pollerns,
'deviceCnt': dev_nsgrp['hostname'].nunique().tolist(),
'serviceCnt': nsgrp['service'].nunique().tolist()
})
errsvc_df = pollerdf.query('status != 0 and status != 200') \
.groupby(by=['namespace'])['service'] \
.nunique().reset_index()
newdf = newdf.merge(errsvc_df, on=['namespace'], how='left',
suffixes=['', '_y']) \
.rename({'service': 'errSvcCnt'}, axis=1) \
.fillna({'errSvcCnt': 0})
newdf['errSvcCnt'] = newdf['errSvcCnt'].astype(int)
# What protocols exist
for table, fld in [('ospf', 'hasOspf'), ('bgp', 'hasBgp'),
('evpnVni', 'hasVxlan'), ('mlag', 'hasMlag')]:
df = self._get_table_sqobj(table) \
.get(namespace=pollerns, hostname=hosts,
columns=['namespace'])
if df.empty:
newdf[fld] = False
continue
gotns = df.namespace.unique().tolist()
newdf[fld] = newdf.apply(
lambda x, y: True if x.namespace in y else False,
axis=1, args=(gotns,))
newdf = self._handle_user_query_str(newdf, user_query)
# Look for the rest of info only in selected namepaces
newdf['lastUpdate'] = nsgrp['timestamp'].max() \
.reset_index()['timestamp']
return newdf.drop(columns=drop_cols)
def find(self, **kwargs):
'''Find the information requsted:
address: given a MAC or IP address, find the first hop switch its
connected to
'''
addrlist = kwargs.pop('address', [])
columns = kwargs.pop('columns', ['default'])
query_str = kwargs.pop('query_str', '')
dflist = []
if isinstance(addrlist, str):
addrlist = [addrlist]
for addr in addrlist:
df = self._find_address(addr, **kwargs)
if not df.empty:
dflist.append(df)
if dflist:
df = | pd.concat(dflist) | pandas.concat |
import pandas as pd, numpy as np, json
import clubs_loader
nyears=8
def get_members(path):
members=pd.read_excel(path,header=[1])
members=members[[231, 'Nr. EKF',
'Club', 'Unnamed: 3',
'Numele', 'Prenumele',
'Gen', 'Data naşterii',
'1 kyu','practică',
'1 dan', '2 dan',
'3 dan', '4 dan',
'5 dan', '6 dan',
'7 dan', '8 dan',
151,
152, '152.1',
175, 179,
197,214,'231.1']]
members.columns=list(members.columns[:-nyears])+list(range(2019-nyears,2019))
return members
def get_transfer(name,tf,verbose=False):
if tf==[]:
return tf
else:
to_blank=[' ','(',')','Transfer:','?','/']
to_replace={'Hungary':'HUN'}
to_year={'Gușu Rebeca':'2010'}
def get_tf_clubs(z):
for t in range(len(to_blank)):
z=z.replace(to_blank[t],'')
for t in to_replace:
z=z.replace(t,to_replace[t])
if ('=>') in z:
from_to=z.find('=>')
to_return={'from':z[from_to-3:from_to],'to':z[from_to+2:from_to+5],'time':z[-4:]}
if verbose:
to_return['orig']=z
else:
print('error with transfer',z,)
to_return=z
##check years
#infer year from wrong note order
if '20' not in to_return['time']:
if '20' in z:
to_return['time']=z[z.find('20'):z.find('20')+4]
#if still not inferred, then manual fix
if '20' not in to_return['time']:
to_return['time']=to_year[name]
to_return['time']=int(to_return['time'])
return to_return
transfers=str(tf).split('\n')
tfr=[]
for i in transfers:
if not i in ('','nan'):
tfr.append(get_tf_clubs(i))
return sorted(tfr, key=lambda k: k['time'])
def cleaner(members):
data={}
replace_active={'Activ':'Active','Inactiv':'Inactive','Free':'Inactive','AS':'Abroad',
'Transferat':'Abroad','Decedat':'Inactive'}
active_redflags=['Deleted']
for i in members.T.iteritems():
active=i[1][231]
if active not in active_redflags:
grades=i[1][['1 kyu','1 dan','2 dan','3 dan','4 dan','5 dan',
'6 dan','7 dan','8 dan']].replace('x',pd.NaT).dropna()
grades0=i[1][['1 dan','2 dan','3 dan','4 dan','5 dan',
'6 dan','7 dan','8 dan']].replace('x',pd.NaT).dropna()
df=pd.DataFrame(grades0)
df.columns=['dan']
df=df.reset_index().set_index('dan').sort_index()
dummy={}
grades=pd.to_datetime(grades.astype(str))
active=replace_active[active]
if len(grades)>0:
mingrade=grades.min().year
maxgrade=grades.max().year
else:
mingrade=2016 #default starting year
maxgrade=2016
if active=='Active':
maxyear=2019 #default active max year
else:
maxyear=min(maxgrade+4,2019) #default active years grace period, if unknown
dummy['name']=i[1]['Numele']+' '+i[1]['Prenumele']
dummy['birth']=str(i[1]['Data naşterii'])[:10]
dummy['gen']=i[1]['Gen']
dummy['ekf']=i[1]['Nr. EKF']
club=i[1]['Club']
dummy['transfer']=get_transfer(dummy['name'],i[1]['Unnamed: 3'])
for year in range(mingrade,maxyear):
if year==maxyear-1:
dummy['active']=active
else:
dummy['active']='Active'
#get year from exams
dummy['dan']=len(df[:str
(year)])
#get active from member list
for j in i[1][-nyears:].index:
if year==j:
if i[1][-nyears:][j]=='Da':
dummy['active']=active
else:
dummy['active']='Inactive'
#get club from transfers
clubs=clubs_loader.get_club_by_year(dummy['transfer'],club,year,mingrade,maxyear)
clubs=clubs[:1] #remove this step to double count. this limits to first club in transfer years
for j in range(len(clubs)):
iclub=clubs_loader.replacer(clubs[j])
dummy['club']=iclub
dummy['age']=year-1- | pd.to_datetime(dummy['birth']) | pandas.to_datetime |
#%%
import os
try:
os.chdir('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
print(os.getcwd())
except:
pass
#%%
import sys
sys.path.append("/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/")
import pandas as pd
import numpy as np
import connectome_tools.process_matrix as promat
import math
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import pymaid
from pymaid_creds import url, name, password, token
# convert pair-sorted brain/sensories matrix to binary matrix based on synapse threshold
matrix_ad = pd.read_csv('data/axon-dendrite.csv', header=0, index_col=0)
matrix_dd = pd.read_csv('data/dendrite-dendrite.csv', header=0, index_col=0)
matrix_aa = pd.read_csv('data/axon-axon.csv', header=0, index_col=0)
matrix_da = pd.read_csv('data/dendrite-axon.csv', header=0, index_col=0)
matrix = matrix_ad + matrix_dd + matrix_aa + matrix_da
# the columns are string by default and the indices int; now both are int
matrix_ad.columns = pd.to_numeric(matrix_ad.columns)
matrix_dd.columns = | pd.to_numeric(matrix_dd.columns) | pandas.to_numeric |
import numpy as np
import cv2 as cv
import sys
import pandas as pd
import os
#img = cv.imread(r'C:\Users\shubh\Desktop\sample_images\Malignant'+'\\'+'M_3.jpg')
#imgGray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
#imgResize = cv.resize(img, (512,512))
def main():
start = None
end = None
print(start, end)
if start == None:
print("Start not specified, setting to 1")
start = 1
if end == None:
print("End not specified, setting to 5")
end = 1000
if start > end:
print("Start > End")
sys.exit(0)
opArr4 = []
opArr8 = []
opArr16 = []
apArr_32 = []
apArr_64 = []
for x in ("Benign", "Malignant"):
print(x)
if x == "Benign":
fName = "B_"
if x == "Malignant":
fName = "M_"
for f in range(start,end+1):
print(x + str(f))
img = cv.imread(r"C:\Users\shubh\Desktop\BE PROJECT\BE seminar\datasets\ISIC_Complete\\" + x + "\Images\\" + fName + str(f) + ".jpg")
img = cv.resize(img, (512,512))
fNameDCT = fName + str(f)
DctTransform_4, DctTransform_8, DctTransform_16, DctTransform_32, DctTransform_64, reducedImg = dct_transform(img, fNameDCT, x)
opArr4.append(DctTransform_4)
opArr8.append(DctTransform_8)
opArr16.append(DctTransform_16)
apArr_32.append(DctTransform_32)
apArr_64.append(DctTransform_64)
pd.DataFrame(opArr4).to_csv("opDCT_4", mode="a", header=True, index = False)
pd.DataFrame(opArr8).to_csv("opDCT_8", mode="a", header=True, index = False)
| pd.DataFrame(opArr16) | pandas.DataFrame |
import numpy as np
from sklearn.grid_search import ParameterGrid
from ga.simulator import Simulator
from tsp_generator import TSPGenerator
import pandas as pd
class GeneticAlgorithmParameterEstimation():
def __init__(self, num_datasets, dataset_size):
"""Creates a new GA parameter tuner.
This class can be used to tune the parameters of a GA but testing the
performance across a range of different parameter settings.
:param num_datasets: the number of datasets to generate
:param dataset_size: the size of the dataset to generate
"""
self._num_datasets = num_datasets
self._generator = TSPGenerator(dataset_size)
def perform_grid_search(self, params):
"""Perform a grid search over the range of parameters provided
This will create a single set of parameters for each of the ranges
specified in the params argument using scikit-learn's ParameterGrid
function.
Each set of sets of parameters is tested against multiple randomly
generated datasets (of the same size). The average fitness achieved
over all test datasets is taken as the measure of quality for the
parameters.
:param params: dictionary of ranges of parameters to be passed to ParameterGrid
:return: dictionary of the best parameter settings found
:rtype: dict
"""
param_grid = list(ParameterGrid(params))
self._param_fitness = []
datasets = [self._generator.generate() for _ in range(self._num_datasets)]
param_data = | pd.DataFrame.from_dict(param_grid) | pandas.DataFrame.from_dict |
import datetime
import string
import matplotlib.dates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from nltk import WordNetLemmatizer, LancasterStemmer, pos_tag, sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from nltk.sentiment import SentimentIntensityAnalyzer
from pandas._libs.tslibs.offsets import BDay
from sklearn import tree
from sklearn.calibration import calibration_curve
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split, learning_curve
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR, LinearSVC
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier, plot_tree
from textblob import TextBlob
from wordcloud import WordCloud
def plot_learning_curve(estimator, title, X, y, axes=None, ylim=None, cv=None,
n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):
if axes is None:
_, axes = plt.subplots(1, 3, figsize=(20, 5))
axes[0].set_title(title)
if ylim is not None:
axes[0].set_ylim(*ylim)
axes[0].set_xlabel("Training examples")
axes[0].set_ylabel("Score")
train_sizes, train_scores, test_scores, fit_times, _ =\
learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs,
train_sizes=train_sizes,
return_times=True)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
fit_times_mean = np.mean(fit_times, axis=1)
fit_times_std = np.std(fit_times, axis=1)
axes[0].grid()
axes[0].fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
axes[0].fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1,
color="g")
axes[0].plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
axes[0].plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
axes[0].legend(loc="best")
axes[1].grid()
axes[1].plot(train_sizes, fit_times_mean, 'o-')
axes[1].fill_between(train_sizes, fit_times_mean - fit_times_std,
fit_times_mean + fit_times_std, alpha=0.1)
axes[1].set_xlabel("Training examples")
axes[1].set_ylabel("fit_times")
axes[1].set_title("Scalability of the model")
axes[2].grid()
axes[2].plot(fit_times_mean, test_scores_mean, 'o-')
axes[2].fill_between(fit_times_mean, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1)
axes[2].set_xlabel("fit_times")
axes[2].set_ylabel("Score")
axes[2].set_title("Performance of the model")
return plt
def create_word_cloud(text, type):
print('\nCreating word cloud...')
word_cloud = WordCloud(width=1024, height=1024, margin=0).generate(text)
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
ax.imshow(word_cloud, interpolation='bilinear')
ax.axis("off")
ax.margins(x=0, y=0)
plt.savefig(f'wordcloud_{type}.png')
def get_stop_words(tokens):
stop_word_tokens = []
for word in tokens:
if word.startswith('//t.co/') or word.startswith('http') or word in ['RT', 'http', 'rt', 'timestamp',
'.', '[video]', 'AMP', 'and', 'at',
'for', 'from', 'the', 'this', 'is',
'it', 'jul', 'of', 'on', 'to', 'in',
'with', 2018, 'FALSE', '2018', 'amp',
'you', 'by', False, 0, 7, 12, 15,
'0', '7', '12', '15', 'inc']:
continue
elif word not in stopwords.words('english') or word not in ['RT', 'http', 'rt', 'timestamp', '.', '[video]']:
stop_word_tokens.append(word)
sentence = ' '.join(stop_word_tokens)
return sentence
def get_lemma(tokens):
lemma = WordNetLemmatizer()
lemmatized_tokens = []
for token in tokens:
temp_tokens = lemma.lemmatize(token)
lemmatized_tokens.append(temp_tokens)
return get_stop_words(lemmatized_tokens)
def get_stems(tokens):
stemmer = LancasterStemmer()
stemmed_tokens = []
for token in tokens:
for word in token:
if word[1] == 'DT' or word[1] == 'PRP' or word[1] == 'PRP$' or word[1] == 'NN' or word[1] == 'NNP' or word[1] == 'NNPS':
temp_tokens = word[0]
else:
temp_tokens = stemmer.stem(word[0])
stemmed_tokens.append(temp_tokens)
return get_lemma(stemmed_tokens)
def get_pos_tag(tokens):
pos_tokens = [pos_tag(token) for token in tokens]
return get_stems(pos_tokens)
def get_tokens(document):
sequences = sent_tokenize(document)
seq_tokens = [word_tokenize(sequence) for sequence in sequences]
no_punctuation_seq_tokens = []
for seq_token in seq_tokens:
no_punctuation_seq_tokens.append([token for token in seq_token if token not in string.punctuation])
return get_pos_tag(no_punctuation_seq_tokens)
def get_num_words(s):
return len(s.split())
def append_col(train_data):
print('\nGetting number of words in new text cells...')
word_counts = []
for index, row in train_data.iterrows():
word_counts.append(get_num_words(row['new_text']))
train_data['new_text_count'] = word_counts
return train_data
def get_bigrams(train_data):
print("\nCalculating the bigrams...")
bigram_vectorizer = CountVectorizer(ngram_range=[2, 2])
x = bigram_vectorizer.fit_transform(train_data.text)
bigram_total = bigram_vectorizer.get_feature_names()
transformer = TfidfTransformer()
mat = transformer.fit_transform(x)
bigrams = pd.DataFrame(mat.todense(), index=train_data.index, columns=bigram_vectorizer.get_feature_names())
train_data = pd.concat([train_data, bigrams], ignore_index=False, sort=False, axis=1, join="inner")
return len(bigram_total), train_data
def get_trigrams(train_data):
print("\nCalculating the trigrams...")
trigram_vectorizer = CountVectorizer(ngram_range=[3, 3])
x = trigram_vectorizer.fit_transform(train_data.text)
trigram_total = trigram_vectorizer.get_feature_names()
transformer = TfidfTransformer()
mat = transformer.fit_transform(x)
trigram = pd.DataFrame(mat.todense(), index=train_data.index, columns=trigram_vectorizer.get_feature_names())
train_data = pd.concat([train_data, trigram], ignore_index=False, sort=False, axis=1, join="inner")
return len(trigram_total), train_data
def get_bag_of_words(train_data, features, name, type):
print("\nCalculating the bag of words...")
vectorizer = CountVectorizer(max_features=features, stop_words='english')
x = vectorizer.fit_transform(train_data.text)
words = vectorizer.get_feature_names()
transformer = TfidfTransformer()
mat = transformer.fit_transform(x)
bow = pd.DataFrame(mat.todense(), index=train_data.index, columns=vectorizer.get_feature_names())
train_data = | pd.concat([train_data, bow], ignore_index=False, sort=False, axis=1, join="inner") | pandas.concat |
# coding=utf-8
import datetime as dt
import os
from contextlib import contextmanager
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from moto import mock_s3
from scipy import sparse
from sparsity import SparseFrame, sparse_one_hot
from sparsity.io_ import _csr_to_dict
from .conftest import tmpdir
@contextmanager
def mock_s3_fs(bucket, data=None):
"""Mocks an s3 bucket
Parameters
----------
bucket: str
bucket name
data: dict
dictionary with paths relative to bucket and
bytestrings as values. Will mock data in bucket
if supplied.
Returns
-------
"""
try:
m = mock_s3()
m.start()
import boto3
import s3fs
client = boto3.client('s3', region_name='eu-west-1')
client.create_bucket(Bucket=bucket)
if data is not None:
data = data.copy()
for key, value in data.items():
client.put_object(Bucket=bucket, Key=key, Body=value)
yield
finally:
if data is not None:
for key in data.keys():
client.delete_object(Bucket=bucket, Key=key)
m.stop()
def test_empty_init():
sf = SparseFrame(np.array([]), index=[], columns=['A', 'B'])
assert sf.data.shape == (0, 2)
sf = SparseFrame(np.array([]), index=['A', 'B'], columns=[])
assert sf.data.shape == (2, 0)
def test_empty_column_access():
sf = SparseFrame(np.array([]), index=[], columns=['A', 'B', 'C', 'D'])
assert sf['D'].data.shape == (0, 1)
def test_groupby(groupby_frame):
t = groupby_frame
res = t.groupby_sum().data.todense()
assert np.all(res == (np.identity(10) * 10))
def test_groupby_dense_random_data():
shuffle_idx = np.random.permutation(np.arange(100))
index = np.tile(np.arange(10), 10)
single_tile = np.random.rand(10, 10)
data = np.vstack([single_tile for _ in range(10)])
t = SparseFrame(data[shuffle_idx, :], index=index[shuffle_idx])
res = t.groupby_sum().data.todense()
np.testing.assert_array_almost_equal(res, (single_tile * 10))
def test_simple_join():
t = SparseFrame(np.identity(10))
res1 = t.join(t, axis=0).data.todense()
correct = np.vstack([np.identity(10), np.identity(10)])
assert np.all(res1 == correct)
res2 = t.join(t, axis=1).data.todense()
correct = np.hstack([np.identity(10), np.identity(10)])
assert np.all(res2 == correct)
def test_complex_join(complex_example):
first, second, third = complex_example
correct = pd.DataFrame(first.data.todense(),
index=first.index,
columns=map(str, range(len(first.columns)))) \
.join(pd.DataFrame(second.data.todense(),
index=second.index,
columns=map(str, range(len(second.columns)))),
how='left',
rsuffix='_second') \
.join(pd.DataFrame(third.data.todense(),
index=third.index,
columns=map(str, range(len(third.columns)))),
how='left',
rsuffix='_third') \
.sort_index().fillna(0)
res = first.join(second, axis=1).join(third, axis=1) \
.sort_index().data.todense()
assert np.all(correct.values == res)
# res = right.join(left, axis=1).data.todense()
# assert np.all(correct == res)
def test_mutually_exclusive_join():
correct = np.vstack([np.hstack([np.identity(5), np.zeros((5, 5))]),
np.hstack([np.zeros((5, 5)), np.identity(5)])])
left_ax1 = SparseFrame(np.identity(5), index=np.arange(5))
right_ax1 = SparseFrame(np.identity(5), index=np.arange(5, 10))
res_ax1 = left_ax1.join(right_ax1, axis=1)
left_ax0 = SparseFrame(np.identity(5), columns=np.arange(5))
right_ax0 = SparseFrame(np.identity(5), columns=np.arange(5, 10))
res_ax0 = left_ax0.join(right_ax0, axis=0)
assert np.all(res_ax0.data.todense() == correct), \
"Joining along axis 0 failed."
assert np.all(res_ax1.data.todense() == correct), \
"Joining along axis 1 failed."
def test__array___():
correct = np.identity(5)
sf = SparseFrame(correct, index=list('ABCDE'),
columns=list('ABCDE'))
res = np.asarray(sf)
assert np.all(res == correct)
assert isinstance(res, np.ndarray)
res = np.asarray(sf['A'])
assert len(res.shape) == 1
def test_iloc():
# name index and columns somehow so that their names are not integers
sf = SparseFrame(np.identity(5), index=list('ABCDE'),
columns=list('ABCDE'))
assert np.all(sf.iloc[:2].data.todense() == np.identity(5)[:2])
assert np.all(sf.iloc[[3, 4]].data.todense() == np.identity(5)[[3, 4]])
assert np.all(sf.iloc[3].data.todense() == np.identity(5)[3])
assert sf.iloc[1:].shape == (4, 5)
def test_loc():
sf = SparseFrame(np.identity(5), index=list("ABCDE"))
# test single
assert np.all(sf.loc['A'].data.todense() == np.matrix([[1, 0, 0, 0, 0]]))
# test slices
assert np.all(sf.loc[:'B'].data.todense() == np.identity(5)[:2])
# test all
assert np.all(sf.loc[list("ABCDE")].data.todense() == np.identity(5))
assert np.all(sf.loc[:, :].data.todense() == np.identity(5))
assert np.all(sf.loc[:].data.todense() == np.identity(5))
sf = SparseFrame(np.identity(5), pd.date_range("2016-10-01", periods=5))
str_slice = slice('2016-10-01',"2016-10-03")
assert np.all(sf.loc[str_slice].data.todense() ==
np.identity(5)[:3])
ts_slice = slice(pd.Timestamp('2016-10-01'),pd.Timestamp("2016-10-03"))
assert np.all(sf.loc[ts_slice].data.todense() ==
np.identity(5)[:3])
dt_slice = slice(dt.date(2016,10,1), dt.date(2016,10,3))
assert np.all(sf.loc[dt_slice].data.todense() ==
np.identity(5)[:3])
def test_loc_multi_index(sf_midx, sf_midx_int):
assert sf_midx.loc['2016-10-01'].data[0, 0] == 1
str_slice = slice('2016-10-01', "2016-10-03")
assert np.all(sf_midx.loc[str_slice].data.todense() ==
np.identity(5)[:3])
ts_slice = slice(pd.Timestamp('2016-10-01'), pd.Timestamp("2016-10-03"))
assert np.all(sf_midx.loc[ts_slice].data.todense() ==
np.identity(5)[:3])
dt_slice = slice(dt.date(2016, 10, 1), dt.date(2016, 10, 3))
assert np.all(sf_midx.loc[dt_slice].data.todense() ==
np.identity(5)[:3])
assert np.all(sf_midx_int.loc[1].todense().values == sf_midx.data[:4,:])
assert np.all(sf_midx_int.loc[0].todense().values == sf_midx.data[4, :])
def test_set_index(sf_midx):
sf = sf_midx.set_index(level=1)
assert np.all(sf.index.values == np.arange(5))
sf = sf_midx.set_index(column='A')
assert np.all(sf.index.values[1:] == 0)
assert sf.index.values[0] == 1
sf = sf_midx.set_index(idx=np.arange(5))
assert np.all(sf.index.values == np.arange(5))
# what if indices are actually ints, but don't start from 0?
sf = SparseFrame(np.identity(5), index=[1, 2, 3, 4, 5])
# test single
assert np.all(sf.loc[1].data.todense() == np.matrix([[1, 0, 0, 0, 0]]))
# test slices
assert np.all(sf.loc[:2].data.todense() == np.identity(5)[:2])
# assert np.all(sf.loc[[4, 5]].data.todense() == np.identity(5)[[3, 4]])
def test_save_load_multiindex(sf_midx):
with tmpdir() as tmp:
# test new
path = os.path.join(tmp, 'sf.npz')
sf_midx.to_npz(path)
res = SparseFrame.read_npz(path)
assert isinstance(res.index, pd.MultiIndex)
# test backwards compatibility
def _to_npz_legacy(sf, filename):
data = _csr_to_dict(sf.data)
data['frame_index'] = sf.index.values
data['frame_columns'] = sf.columns.values
np.savez(filename, **data)
_to_npz_legacy(sf_midx, path)
res = SparseFrame.read_npz(path)
assert isinstance(res.index, pd.MultiIndex)
def test_new_column_assign_array():
sf = SparseFrame(np.identity(5))
sf[6] = np.ones(5)
correct = np.hstack([np.identity(5), np.ones(5).reshape(-1, 1)])
assert sf.shape == (5, 6)
assert np.all(correct == sf.data.todense())
def test_new_column_assign_number():
sf = SparseFrame(np.identity(5))
sf[6] = 1
correct = np.hstack([np.identity(5), np.ones(5).reshape(-1, 1)])
assert sf.shape == (5, 6)
assert np.all(correct == sf.data.todense())
def test_assign_array():
sf = SparseFrame(np.identity(5), columns=list('ABCDE'))
sf = sf.assign(**{'F': np.ones(5)})
correct = np.hstack([np.identity(5), np.ones(5).reshape(-1, 1)])
assert 'F' in set(sf.columns)
assert sf.shape == (5, 6)
assert np.all(correct == sf.data.todense())
def test_assign_number():
sf = SparseFrame(np.identity(5), columns=list('ABCDE'))
sf = sf.assign(**{'F': 1})
correct = np.hstack([np.identity(5), np.ones(5).reshape(-1, 1)])
assert 'F' in set(sf.columns)
assert sf.shape == (5, 6)
assert np.all(correct == sf.data.todense())
def test_existing_column_assign_array():
sf = SparseFrame(np.identity(5))
with pytest.raises(NotImplementedError):
sf[0] = np.ones(5)
correct = np.identity(5)
correct[:, 0] = 1
assert np.all(correct == sf.data.todense())
def test_existing_column_assign_number():
sf = SparseFrame(np.identity(5))
with pytest.raises(NotImplementedError):
sf[0] = 1
correct = np.identity(5)
correct[:, 0] = 1
assert np.all(correct == sf.data.todense())
def test_add_total_overlap(complex_example):
first, second, third = complex_example
correct = first.sort_index().data.todense()
correct[2:6, :] += second.sort_index().data.todense()
correct[6:, :] += third.sort_index().data.todense()
res = first.add(second).add(third).sort_index()
assert np.all(res.data.todense() == correct)
def test_simple_add_partial_overlap(complex_example):
first = SparseFrame(np.ones((3, 5)), index=[0, 1, 2])
second = SparseFrame(np.ones((3, 5)), index=[2, 3, 4])
correct = np.ones((5,5))
correct[2, :] += 1
res = first.add(second)
assert np.all(res.data.todense() == correct)
assert np.all(res.index == range(5))
def test_add_partial_overlap(complex_example):
first, second, third = complex_example
third = third.sort_index()
third._index = np.arange(8, 12)
correct = first.sort_index().data.todense()
correct[2:6, :] += second.sort_index().data.todense()
correct[8:, :] += third.sort_index().data.todense()[:2, :]
correct = np.vstack((correct, third.sort_index().data.todense()[2:, :]))
res = first.add(second).add(third).sort_index()
assert np.all(res.data.todense() == correct)
def test_add_no_overlap(complex_example):
first, second, third = complex_example
third = third.sort_index()
third._index = np.arange(10, 14)
correct = first.sort_index().data.todense()
correct[2:6, :] += second.sort_index().data.todense()
correct = np.vstack((correct, third.sort_index().data.todense()))
res = first.add(second).add(third).sort_index()
assert np.all(res.data.todense() == correct)
def test_csr_one_hot_series_disk_categories(sampledata):
with tmpdir() as tmp:
categories = ['Sunday', 'Monday', 'Tuesday', 'Wednesday',
'Thursday', 'Friday', 'Saturday']
cat_path = os.path.join(tmp, 'bla.pickle')
pd.Series(categories).to_pickle(cat_path)
sparse_frame = sparse_one_hot(sampledata(49),
categories={'weekday': cat_path})
res = sparse_frame.groupby_sum(np.tile(np.arange(7), 7)).data.todense()
assert np.all(res == np.identity(7) * 7)
def test_csr_one_hot_series_legacy(sampledata):
categories = ['Sunday', 'Monday', 'Tuesday', 'Wednesday',
'Thursday', 'Friday', 'Saturday']
sparse_frame = sparse_one_hot(sampledata(49), 'weekday', categories)
res = sparse_frame.groupby_sum(np.tile(np.arange(7), 7)).data.todense()
assert np.all(res == np.identity(7) * 7)
def test_csr_one_hot_series(sampledata, weekdays, weekdays_abbr):
correct = np.hstack((np.identity(7) * 7,
np.identity(7) * 7))
categories = {'weekday': weekdays,
'weekday_abbr': weekdays_abbr}
sparse_frame = sparse_one_hot(sampledata(49), categories=categories,
order=['weekday', 'weekday_abbr'])
res = sparse_frame.groupby_sum(np.tile(np.arange(7), 7)).data.todense()
assert np.all(res == correct)
assert all(sparse_frame.columns == (weekdays + weekdays_abbr))
def test_csr_one_hot_series_categorical_same_order(sampledata, weekdays,
weekdays_abbr):
correct = np.hstack((np.identity(7) * 7,
np.identity(7) * 7))
data = sampledata(49, categorical=True)
categories = {'weekday': data['weekday'].cat.categories.tolist(),
'weekday_abbr': data['weekday_abbr'].cat.categories.tolist()}
sparse_frame = sparse_one_hot(data,
categories=categories,
order=['weekday', 'weekday_abbr'],
ignore_cat_order_mismatch=False)
res = sparse_frame.groupby_sum(np.tile(np.arange(7), 7)) \
.todense()[weekdays + weekdays_abbr].values
assert np.all(res == correct)
assert set(sparse_frame.columns) == set(weekdays + weekdays_abbr)
def test_csr_one_hot_series_categorical_different_order(sampledata, weekdays,
weekdays_abbr):
correct = np.hstack((np.identity(7) * 7,
np.identity(7) * 7))
data = sampledata(49, categorical=True)
categories = {
'weekday': data['weekday'].cat.categories.tolist()[::-1],
'weekday_abbr': data['weekday_abbr'].cat.categories.tolist()[::-1]
}
with pytest.raises(ValueError):
sparse_frame = sparse_one_hot(data,
categories=categories,
order=['weekday', 'weekday_abbr'],
ignore_cat_order_mismatch=False)
def test_csr_one_hot_series_categorical_different_order_ignore(
sampledata, weekdays, weekdays_abbr):
correct = np.hstack((np.identity(7) * 7,
np.identity(7) * 7))
data = sampledata(49, categorical=True)
categories = {
'weekday': data['weekday'].cat.categories.tolist()[::-1],
'weekday_abbr': data['weekday_abbr'].cat.categories.tolist()[::-1]
}
sparse_frame = sparse_one_hot(data,
categories=categories,
order=['weekday', 'weekday_abbr'],
ignore_cat_order_mismatch=True)
res = sparse_frame.groupby_sum(np.tile(np.arange(7), 7)) \
.todense()[weekdays + weekdays_abbr].values
assert np.all(res == correct)
assert set(sparse_frame.columns) == set(weekdays + weekdays_abbr)
def test_csr_one_hot_series_categorical_no_categories(
sampledata, weekdays, weekdays_abbr):
correct = np.hstack((np.identity(7) * 7,
np.identity(7) * 7))
data = sampledata(49, categorical=True)
categories = {
'weekday': None,
'weekday_abbr': None
}
sparse_frame = sparse_one_hot(data,
categories=categories,
order=['weekday', 'weekday_abbr'],
ignore_cat_order_mismatch=True)
res = sparse_frame.groupby_sum(np.tile(np.arange(7), 7)) \
.todense()[weekdays + weekdays_abbr].values
assert np.all(res == correct)
assert set(sparse_frame.columns) == set(weekdays + weekdays_abbr)
def test_csr_one_hot_series_other_order(sampledata, weekdays, weekdays_abbr):
categories = {'weekday': weekdays,
'weekday_abbr': weekdays_abbr}
sparse_frame = sparse_one_hot(sampledata(49), categories=categories,
order=['weekday_abbr', 'weekday'])
assert all(sparse_frame.columns == (weekdays_abbr + weekdays))
def test_csr_one_hot_series_no_categories(sampledata, weekdays, weekdays_abbr):
data = sampledata(49, categorical=True).drop('date', axis=1)
sparse_frame = sparse_one_hot(data)
assert set(sparse_frame.columns) \
== set(weekdays_abbr) | set(weekdays) | {'id'}
def test_csr_one_hot_series_wrong_order(sampledata, weekdays, weekdays_abbr):
categories = {'weekday': weekdays,
'weekday_abbr': weekdays_abbr}
with pytest.raises(AssertionError):
sparse_one_hot(sampledata(49), categories=categories,
order=['weekday_abbr', 'weekday', 'wat'])
with pytest.raises(AssertionError):
sparse_one_hot(sampledata(49), categories=categories,
order=['weekday_abbr'])
def test_csr_one_hot_series_no_order(sampledata, weekdays, weekdays_abbr):
categories = {'weekday': weekdays,
'weekday_abbr': weekdays_abbr}
sparse_frame = sparse_one_hot(sampledata(49), categories=categories)
assert sorted(sparse_frame.columns) == sorted(weekdays_abbr + weekdays)
def test_csr_one_hot_series_prefixes(sampledata, weekdays, weekdays_abbr):
correct = np.hstack((np.identity(7) * 7,
np.identity(7) * 7))
categories = {'weekday': weekdays,
'weekday_abbr': weekdays_abbr}
sparse_frame = sparse_one_hot(sampledata(49), categories=categories,
order=['weekday', 'weekday_abbr'],
prefixes=True)
res = sparse_frame.groupby_sum(np.tile(np.arange(7), 7)).data.todense()
assert np.all(res == correct)
correct_columns = list(map(lambda x: 'weekday_' + x, weekdays)) \
+ list(map(lambda x: 'weekday_abbr_' + x, weekdays_abbr))
assert all(sparse_frame.columns == correct_columns)
def test_csr_one_hot_series_prefixes_sep(sampledata, weekdays, weekdays_abbr):
categories = {'weekday': weekdays,
'weekday_abbr': weekdays_abbr}
sparse_frame = sparse_one_hot(sampledata(49), categories=categories,
order=['weekday', 'weekday_abbr'],
prefixes=True, sep='=')
correct_columns = list(map(lambda x: 'weekday=' + x, weekdays)) \
+ list(map(lambda x: 'weekday_abbr=' + x, weekdays_abbr))
assert all(sparse_frame.columns == correct_columns)
def test_csr_one_hot_series_same_categories(weekdays):
sample_data = pd.DataFrame(
dict(date=pd.date_range("2017-01-01", periods=7)))
sample_data["weekday"] = sample_data.date.dt.weekday_name
sample_data["weekday2"] = sample_data.date.dt.weekday_name
categories = {'weekday': weekdays,
'weekday2': weekdays}
with pytest.raises(ValueError):
sparse_one_hot(sample_data, categories=categories,
order=['weekday', 'weekday2'])
sparse_frame = sparse_one_hot(sample_data, categories=categories,
order=['weekday', 'weekday2'],
prefixes=True)
correct_columns = list(map(lambda x: 'weekday_' + x, weekdays)) \
+ list(map(lambda x: 'weekday2_' + x, weekdays))
assert all(sparse_frame.columns == correct_columns)
def test_csr_one_hot_series_too_much_categories(sampledata):
categories = ['Sunday', 'Monday', 'Tuesday', 'Wednesday',
'Thursday', 'Friday', 'Yesterday', 'Saturday', 'Birthday']
sparse_frame = sparse_one_hot(sampledata(49),
categories={'weekday': categories})
res = sparse_frame.groupby_sum(np.tile(np.arange(7), 7)).data.todense()
correct = np.identity(7) * 7
correct = np.hstack((correct[:,:6], np.zeros((7, 1)),
correct[:, 6:], np.zeros((7, 1))))
assert np.all(res == correct)
def test_csr_one_hot_series_too_little_categories(sampledata):
categories = ['Sunday', 'Monday', 'Tuesday', 'Wednesday',
'Thursday', 'Friday']
with pytest.raises(ValueError):
sparse_one_hot(sampledata(49), categories={'weekday': categories})
def test_csr_one_hot_series_dense_column(sampledata, weekdays, weekdays_abbr):
correct_without_dense = np.hstack((np.identity(7) * 7,
np.identity(7) * 7))
data = sampledata(49, categorical=True)
data['dense'] = np.random.rand(len(data))
categories = {
'weekday': None,
'weekday_abbr': None,
'dense': False,
}
sparse_frame = sparse_one_hot(data, categories=categories)
res = sparse_frame.groupby_sum(np.tile(np.arange(7), 7)).todense()
assert set(sparse_frame.columns) \
== set(weekdays + weekdays_abbr + ['dense'])
assert np.all(res[weekdays + weekdays_abbr] == correct_without_dense)
assert (sparse_frame['dense'].todense() == data['dense']).all()
def test_csr_one_hot_series_dense_column_non_numeric(sampledata, weekdays,
weekdays_abbr):
data = sampledata(49, categorical=True)
data['dense'] = np.random.choice(list('abc'), len(data))
categories = {
'weekday': None,
'weekday_abbr': None,
'dense': False,
}
with pytest.raises(TypeError,
match='Column `dense` is not of numerical dtype'):
sparse_one_hot(data, categories=categories)
def test_npz_io(complex_example):
sf, second, third = complex_example
sf.to_npz('/tmp/sparse.npz')
loaded = SparseFrame.read_npz('/tmp/sparse.npz')
assert np.all(loaded.data.todense() == sf.data.todense())
assert np.all(loaded.index == sf.index)
assert np.all(loaded.columns == sf.columns)
os.remove('/tmp/sparse.npz')
def test_npz_io_s3(complex_example):
with mock_s3_fs('sparsity'):
sf, second, third = complex_example
sf.to_npz('s3://sparsity/sparse.npz')
loaded = SparseFrame.read_npz('s3://sparsity/sparse.npz')
assert np.all(loaded.data.todense() == sf.data.todense())
assert np.all(loaded.index == sf.index)
assert np.all(loaded.columns == sf.columns)
# noinspection PyStatementEffect
def test_getitem():
id_ = np.identity(10)
sf = SparseFrame(id_, columns=list('abcdefghij'))
assert sf['a'].data.todense()[0] == 1
assert sf['j'].data.todense()[9] == 1
assert np.all(sf[['a', 'b']].data.todense() == np.asmatrix(id_[:, [0, 1]]))
tmp = sf[['j', 'a']].data.todense()
assert tmp[9, 0] == 1
assert tmp[0, 1] == 1
assert (sf[list('abcdefghij')].data.todense() == np.identity(10)).all()
assert sf[[]].shape == (10, 0)
assert len(sf[[]].columns) == 0
assert isinstance(sf.columns, type(sf[[]].columns))
with pytest.raises(ValueError):
sf[None]
idx = pd.Index(list('abc'))
| pdt.assert_index_equal(idx, sf[idx].columns) | pandas.testing.assert_index_equal |
import cv2.aruco as aruco
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
import cv2, shutil, math, re, random, functools, time, os, errno
from sklearn.metrics import mean_absolute_error
from glob import glob
from shapely.geometry import Polygon
def extract_directory(files, data_folder, out_folder):
"""
Given a list of all the images, extracts all sub-directories in order
:param files: list of strs, containing all the image paths
:param data_folder: str, folder name containing the dataset
:param out_folder: str, folder name that will contain the newly created montage training set
:return: all_dirs: set of str, containing unique sequence of sub-directories that need to be created.
"""
all_dirs = set()
for f in files:
f_list = f.split('/')
idx = f_list.index(data_folder)
f_list = f_list[idx - len(f_list) + 1:-1]
all_dirs.add(os.path.join(out_folder, 'cropped', '/'.join(f_list)))
all_dirs.add(os.path.join(out_folder, 'detected_img', '/'.join(f_list)))
return all_dirs
def make_dir(pths):
"""
Creates all the directories listed in pths.
Warning: if directories and subdirectories are both given, directories must be made first or else,
some subdirectories will be deleted when the directory is made. This is because when each folder is made,
the algorithm checks if it exists, and if it does,
it proceeds to delete all its contents before remaking the folder.
:param pths: List of str, or str.
:return: None
"""
if isinstance(pths, list) or isinstance(pths, set):
for pth in pths:
if os.path.exists(pth):
shutil.rmtree(pth)
os.makedirs(pth)
else:
if os.path.exists(pths):
shutil.rmtree(pths)
os.makedirs(pths)
def detect_marker(image, img, aruco_dict, parameters):
"""
Using aruco algorithm, automatically find the marker.
Assumption: There is always a marker of interest in the image
:param image: numpy array of the image
:param img: image path
:param aruco_dict: aruco dictionary
:param parameters: aruco dictionary parameters
:return: corners of the aruco marker
"""
dummy_corners = np.array([[0, 0], [1, 1], [2, 2], [3, 3]])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Initial try to find marker
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
if len(corners) != 0:
found_marker = True
else:
found_marker = False
# Try rotating image to find marker
if not found_marker:
for i in range(0, 360, 90):
# print(i)
gray_rotated, dummy_corners = rotate_image(gray, dummy_corners, i)
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray_rotated, aruco_dict, parameters=parameters)
if len(corners) != 0:
print("Found markers!")
gray, corners = rotate_image(gray_rotated, corners, -i)
found_marker = True
break
# Check that there is only one marker detected
if len(corners) == 0:
print("Found no markers in file: {}".format(img))
# Manually find marker
corners = manually_select_marker(image)
# corners = False
elif len(corners) > 1:
print("Found more than 1 markers in file: {}".format(img))
# Manually find marker
corners = manually_select_marker(image)
# corners = False
else:
corners = corners[0][0]
return corners
def manually_select_marker(image):
"""
https://www.pyimagesearch.com/2015/03/09/capturing-mouse-click-events-with-python-and-opencv/
Get user to automatically select 4 points
:param image: numpy image
:return: 4 points of the marker
"""
refPt = []
def click_and_crop(event, x, y, flags, param):
'''
Mouse event connected to window
:param event:
:param x:
:param y:
:param flags:
:param param:
:return:
'''
# if the left mouse button was clicked, record the starting (x, y) coordinates
if event == cv2.EVENT_LBUTTONDOWN:
refPt.append([x, y])
# draw a rectangle around the region of interest
cv2.circle(clone, tuple(refPt[-1]), 25, (0, 255, 0), -1)
cv2.imshow("image", clone)
# load the image, clone it, and setup the mouse callback function
clone = image.copy()
cv2.namedWindow("image", cv2.WINDOW_NORMAL)
cv2.setMouseCallback("image", click_and_crop)
cv2.resizeWindow('image', 2400, 1200)
# keep looping until the 'q' key is pressed
while True:
# display the image and wait for a keypress
cv2.imshow("image", clone)
key = cv2.waitKey(1) & 0xFF
# if the 's' key is pressed, show current points
if key == ord("s"):
print(refPt)
# if the 'r' key is pressed, reset the cropping region
if key == ord("r"):
clone = image.copy()
refPt = []
# if the 'c' key is pressed, break from the loop
elif key == ord("c"):
# if there are four reference points, then crop the region of interest from the image and display it
if len(refPt) == 4:
break
print("You do not have exactly 4 points.")
print(refPt)
# close all open windows
cv2.destroyAllWindows()
return np.array(refPt)
def rotate_image(image, restricted_area, angle):
"""
https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders
Rotates an OpenCV 2 / NumPy image about it's centre by the given angle
(in degrees). The returned image will be large enough to hold the entire
new image, with a black background
"""
# Get the image size
# No that's not an error - NumPy stores image matricies backwards
image_size = (image.shape[1], image.shape[0])
image_center = tuple(np.array(image_size) / 2)
# Convert the OpenCV 3x2 rotation matrix to 3x3
rot_mat = np.vstack(
[cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]]
)
rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])
# Shorthand for below calcs
image_w2 = image_size[0] * 0.5
image_h2 = image_size[1] * 0.5
# Obtain the rotated coordinates of the image corners
rotated_coords = [
(np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],
(np.array([ image_w2, image_h2]) * rot_mat_notranslate).A[0],
(np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],
(np.array([ image_w2, -image_h2]) * rot_mat_notranslate).A[0]
]
# Find the size of the new image
x_coords = [pt[0] for pt in rotated_coords]
x_pos = [x for x in x_coords if x > 0]
x_neg = [x for x in x_coords if x < 0]
y_coords = [pt[1] for pt in rotated_coords]
y_pos = [y for y in y_coords if y > 0]
y_neg = [y for y in y_coords if y < 0]
right_bound = max(x_pos)
left_bound = min(x_neg)
top_bound = max(y_pos)
bot_bound = min(y_neg)
new_w = int(abs(right_bound - left_bound))
new_h = int(abs(top_bound - bot_bound))
# We require a translation matrix to keep the image centred
trans_mat = np.matrix([
[1, 0, int(new_w * 0.5 - image_w2)],
[0, 1, int(new_h * 0.5 - image_h2)],
[0, 0, 1]
])
# Compute the tranform for the combined rotation and translation
affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]
# Obtain the rotated coordinates of the restricted corners
restricted_area = np.array([
np.dot(affine_mat, np.append(restricted_area[0, :], 1)).A[0],
np.dot(affine_mat, np.append(restricted_area[1, :], 1)).A[0],
np.dot(affine_mat, np.append(restricted_area[2, :], 1)).A[0],
np.dot(affine_mat, np.append(restricted_area[3, :], 1)).A[0]
])
# Apply the transform
result = cv2.warpAffine(
image,
affine_mat,
(new_w, new_h),
flags=cv2.INTER_LINEAR
)
return result, restricted_area
def largest_rotated_rect(w, h, angle):
"""
https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders
Given a rectangle of size wxh that has been rotated by 'angle' (in
radians), computes the width and height of the largest possible
axis-aligned rectangle within the rotated rectangle.
Original JS code by 'Andri' and <NAME> from Stack Overflow
Converted to Python by <NAME>
"""
quadrant = int(math.floor(angle / (math.pi / 2))) & 3
sign_alpha = angle if ((quadrant & 1) == 0) else math.pi - angle
alpha = (sign_alpha % math.pi + math.pi) % math.pi
bb_w = w * math.cos(alpha) + h * math.sin(alpha)
bb_h = w * math.sin(alpha) + h * math.cos(alpha)
gamma = math.atan2(bb_w, bb_w) if (w < h) else math.atan2(bb_w, bb_w)
delta = math.pi - alpha - gamma
length = h if (w < h) else w
d = length * math.cos(alpha)
a = d * math.sin(alpha) / math.sin(delta)
y = a * math.cos(gamma)
x = y * math.tan(gamma)
return (bb_w - 2 * x, bb_h - 2 * y)
def crop_around_center(image, rotated_restricted_area, width, height):
"""
Given a NumPy / OpenCV 2 image, crops it to the given width and height,
around it's centre point
"""
cropped_and_rotated_restricted_area = rotated_restricted_area.copy()
image_size = (image.shape[1], image.shape[0])
image_center = (int(image_size[0] * 0.5), int(image_size[1] * 0.5))
if (width > image_size[0]):
width = image_size[0]
if (height > image_size[1]):
height = image_size[1]
x1 = int(image_center[0] - width * 0.5)
x2 = int(image_center[0] + width * 0.5)
y1 = int(image_center[1] - height * 0.5)
y2 = int(image_center[1] + height * 0.5)
# Shift the marker corners by the cropping amount
cropped_and_rotated_restricted_area[:, 0] = cropped_and_rotated_restricted_area[:, 0] - y1
cropped_and_rotated_restricted_area[:, 1] = cropped_and_rotated_restricted_area[:, 1] - x1
return image[y1:y2, x1:x2], cropped_and_rotated_restricted_area
def get_crop(img, cnt, width=299, height=299):
# print("shape of cnt: {}".format(cnt.shape))
rect = cv2.minAreaRect(cnt)
# print("rect: {}".format(rect))
# the order of the box points: bottom left, top left, top right,
# bottom right
box = cv2.boxPoints(rect)
box = np.int0(box)
# print("bounding box: {}".format(box))
# cv2.drawContours(img, [box], 0, (0, 0, 255), 2)
# get width and height of the detected rectangle
if abs(int(rect[1][0])-width) > 10 or abs(int(rect[1][1])-height) > 10:
raise ValueError("Your crop image size and desired image size are very different.")
src_pts = box.astype("float32")
# corrdinate of the points in box points after the rectangle has been
# straightened
dst_pts = np.array([[0, height-1],
[0, 0],
[width-1, 0],
[width-1, height-1]], dtype="float32")
# the perspective transformation matrix
M = cv2.getPerspectiveTransform(src_pts, dst_pts)
# directly warp the rotated rectangle to get the straightened rectangle
return cv2.warpPerspective(img, M, (width, height))
def RotM(alpha):
""" Rotation Matrix for angle ``alpha`` """
sa, ca = np.sin(alpha), np.cos(alpha)
return np.array([[ca, -sa],
[sa, ca]])
def getRandomSquareVertices(center, point_0, phi):
'''
center: tuple
point_0: tuple from origin
phi: angle
'''
vv = [[np.asarray(center) + functools.reduce(np.dot, [RotM(phi), RotM(np.pi / 2 * c), point_0])] for c in range(4)]
return np.array(vv).astype(np.float32)
def get_random_crops(image, crop_height, crop_width, restricted_area, n_crops=4, max_angle=360, seed=None, width=299, height=299, n_channels=1, m_patches=10, margin=75):
"""
Randomly rotate and retrieve crops from image to generate montages
:param image: numpy array, contains the pixel value of images
:param crop_height: int, crop height
:param crop_width: int, crop width
:param restricted_area: numpy array size 4-by-2, containing coordinates of the marker
:param n_crops: int, Number of crops in the montage
:param m_patches: int, Number of montages to generate
:param max_angle: int, Angle by which the image is rotated
:param seed: random number generator seed
:return:
"""
# Initialize parameters
np.random.seed(seed=seed)
image_height, image_width = image.shape[0:2]
crops = []
for i in range(m_patches):
crops.append(get_crops(restricted_area, n_crops, image, crop_width, crop_height, n_channels, margin))
# crops.extend(get_crops(restricted_area, n_crops, image, crop_width, crop_height, n_channels, margin))
return crops
def montage_crops(n_crops, crop_width, crop_height, n_channels, crops):
rows = int(n_crops ** 0.5)
tmp_image = np.zeros([crop_width * rows, crop_height * rows, n_channels], dtype='uint8')
for i in range(rows):
for j in range(rows):
if n_channels == 1:
tmp_image[i * crop_height:(i + 1) * crop_height, j * crop_width:(j + 1) * crop_width, 0] = crops[
i * rows + j]
else:
tmp_image[i * crop_height:(i + 1) * crop_height, j * crop_width:(j + 1) * crop_width, :] = crops[
i * rows + j]
return tmp_image
def get_crops(restricted_area, n_crops, image, crop_width, crop_height, n_channels, margin):
crops = []
# Create polygon to check if randomly generated points are inside polygon
marker_polygon = Polygon(restricted_area)
# Added margin to avoid whitespace on marker
marker_polygon = marker_polygon.buffer(margin)
for n in range(n_crops):
# Generate crops
found_crop = False
# attempt = 0
while not found_crop:
forbid_border = math.ceil((crop_width**2+crop_height**2)**(1/2))/2
max_x = image.shape[1] - forbid_border
max_y = image.shape[0] - forbid_border
x = np.random.randint(forbid_border, max_x)
y = np.random.randint(forbid_border, max_y)
rotation_angle = random.random()*np.pi
crop_vertices = getRandomSquareVertices((x,y), (crop_width/2, crop_height/2), rotation_angle)
crop_polygon = Polygon(
[(crop_vertices[0][0][0], crop_vertices[0][0][1]),
(crop_vertices[1][0][0], crop_vertices[1][0][1]),
(crop_vertices[2][0][0], crop_vertices[2][0][1]),
(crop_vertices[3][0][0], crop_vertices[3][0][1])])
found_crop = not marker_polygon.intersects(crop_polygon)
if found_crop:
if n_channels == 1:
crops.append(get_crop(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), crop_vertices, crop_width, crop_height))###############################################################################################
# for cv, wid in zip(crop_vertices,[1000, 800, 500]):
# crops.append(get_crop(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), cv, wid, wid))
else:
crops.append(get_crop(image, crop_vertices, crop_width, crop_height))
break
# attempt += 1
return montage_crops(n_crops, crop_width, crop_height, n_channels, crops) ############################################################################################### crops
def get_pix_per_len(corners, marker_len):
dist = []
for c in corners:
tmp_dist = []
for c_2 in corners:
tmp_dist.append(((c_2[0] - c[0]) ** 2 + (c_2[1] - c[1]) ** 2) ** 0.5)
tmp_dist = sorted(tmp_dist)[1:-1]
dist.extend(tmp_dist)
return np.average(dist) / marker_len
def extract_crops_from_df_or_img(files, img_df, data_folder, out_folder, marker_len, units, crop_height, crop_width, n_crops, m_patches, equalize_distribution):
# Initialize container for holding patch-wise information
df_crop = {"original_fp": [], 'file': [], "pix_per_len": [], 'units': []}
# If path to a dataframe containing marker information is provided, read the csv.
if img_df is not None:
df_img = | pd.read_csv(img_df) | pandas.read_csv |
from __future__ import annotations
import numpy as np
import pandas as pd
import pytest
from tremana.analysis.transformations import fft_spectra
from tremana.analysis.transformations import power_density_spectra
@pytest.mark.parametrize("time", (30, 60, 120))
@pytest.mark.parametrize(
"frequencies, amplitudes",
(
((1,), (1,)),
((1, 2), (2, 1)),
((1, 5, 10), (1, 5, 10)),
),
)
@pytest.mark.parametrize("sampling_rate", (1000, 2000))
def test_fft_spectra(
time: int,
frequencies: tuple[int | float, ...],
amplitudes: tuple[int | float, ...],
sampling_rate: int,
):
"""FFT peaks are at frequency and have amplitude height"""
t = np.linspace(0, time, num=time * sampling_rate) * 2 * np.pi
signal_values = np.zeros(t.shape)
for frequency, amplitude in zip(frequencies, amplitudes):
signal_values += amplitude * np.sin(frequency * t)
signal = pd.DataFrame({"X": signal_values}, index=t)
result = fft_spectra(signal, columns=["X"], sampling_rate=sampling_rate)
for frequency, amplitude in zip(frequencies, amplitudes):
fft_amplitude_at_frequency = result["X"].iat[
result.index.get_loc(frequency, method="nearest")
]
assert np.allclose(fft_amplitude_at_frequency, amplitude, rtol=1e-3)
def test_fft_spectra_normalized():
"""FFT amplitude is set to 1 and columns are used if not provided"""
time = 30
sampling_rate = 100
t = np.linspace(0, time, num=time * sampling_rate) * 2 * np.pi
frequency = 3
signal_values = 100 * np.sin(frequency * t)
signal = pd.DataFrame({"X": signal_values}, index=t)
result = fft_spectra(signal, sampling_rate=sampling_rate, norm=True)
fft_amplitude_at_frequency = result["X"].iat[result.index.get_loc(frequency, method="nearest")]
assert np.allclose(fft_amplitude_at_frequency, 1)
@pytest.mark.parametrize("time", (30, 60, 120))
@pytest.mark.parametrize(
"frequencies, amplitudes",
(
((1,), (1,)),
((1, 2), (2, 1)),
((1, 5, 10), (1, 5, 10)),
),
)
@pytest.mark.parametrize("sampling_rate", (1000, 2000))
def test_power_density_spectra(
time: int,
frequencies: tuple[int | float, ...],
amplitudes: tuple[int | float, ...],
sampling_rate: int,
):
"""PDS peaks are at frequency and have amplitude height"""
t = np.linspace(0, time, num=time * sampling_rate) * 2 * np.pi
signal_values = np.zeros(t.shape)
for frequency, amplitude in zip(frequencies, amplitudes):
signal_values += amplitude * np.sin(frequency * t)
signal = pd.DataFrame({"X": signal_values}, index=t)
result = power_density_spectra(signal, columns=["X"], sampling_rate=sampling_rate)
for frequency, amplitude in zip(frequencies, amplitudes):
pds_amplitude_at_frequency = result["X"].iat[
result.index.get_loc(frequency, method="nearest")
]
assert np.allclose(pds_amplitude_at_frequency, amplitude ** 2, rtol=1e-3)
def test_power_density_spectra_normalized():
"""PDS amplitude is set to 1 and columns are used if not provided"""
time = 30
sampling_rate = 100
t = np.linspace(0, time, num=time * sampling_rate) * 2 * np.pi
frequency = 3
signal_values = 100 * np.sin(frequency * t)
signal = | pd.DataFrame({"X": signal_values}, index=t) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 26 00:52:14 2018
@author: Kazuki
"""
import os
import pandas as pd
import gc
from multiprocessing import Pool
from glob import glob
import utils
utils.start(__file__)
#==============================================================================
KEY = 'SK_ID_CURR'
PREF = 'ins'
NTHREAD = 3
col_num = ['NUM_INSTALMENT_VERSION', 'NUM_INSTALMENT_NUMBER', 'DAYS_INSTALMENT',
'DAYS_ENTRY_PAYMENT', 'AMT_INSTALMENT', 'AMT_PAYMENT']
col_group = ['SK_ID_PREV', 'NUM_INSTALMENT_VERSION', 'NUM_INSTALMENT_NUMBER']
# =============================================================================
# feature
# =============================================================================
ins = utils.read_pickles('../data/installments_payments')
base = ins[[KEY]].drop_duplicates().set_index(KEY)
def nunique(x):
return len(set(x))
def multi_gr2(k):
gr2 = ins.groupby([KEY, k])
gc.collect()
print(k)
keyname = 'gby-'+'-'.join([KEY, k])
# size
gr1 = gr2.size().groupby(KEY)
name = f'{PREF}_{keyname}_size'
base[f'{name}_min'] = gr1.min()
base[f'{name}_max'] = gr1.max()
base[f'{name}_max-min'] = base[f'{name}_max'] - base[f'{name}_min']
base[f'{name}_mean'] = gr1.mean()
base[f'{name}_std'] = gr1.std()
base[f'{name}_sum'] = gr1.sum()
base[f'{name}_nunique'] = gr1.size()
for v in col_num:
# min
gr1 = gr2[v].min().groupby(KEY)
name = f'{PREF}_{keyname}_{v}_min'
base[f'{name}_max'] = gr1.max()
base[f'{name}_mean'] = gr1.mean()
base[f'{name}_std'] = gr1.std()
base[f'{name}_sum'] = gr1.sum()
base[f'{name}_nunique'] = gr1.apply(nunique)
# max
gr1 = gr2[v].max().groupby(KEY)
name = f'{PREF}_{keyname}_{v}_max'
base[f'{name}_min'] = gr1.min()
base[f'{name}_mean'] = gr1.mean()
base[f'{name}_std'] = gr1.std()
base[f'{name}_sum'] = gr1.sum()
base[f'{name}_nunique'] = gr1.apply(nunique)
# mean
gr1 = gr2[v].mean().groupby(KEY)
name = f'{PREF}_{keyname}_{v}_mean'
base[f'{name}_min'] = gr1.min()
base[f'{name}_max'] = gr1.max()
base[f'{name}_max-min'] = base[f'{name}_max'] - base[f'{name}_min']
base[f'{name}_mean'] = gr1.mean()
base[f'{name}_std'] = gr1.std()
base[f'{name}_sum'] = gr1.sum()
base[f'{name}_nunique'] = gr1.apply(nunique)
# std
gr1 = gr2[v].std().groupby(KEY)
name = f'{PREF}_{keyname}_{v}_std'
base[f'{name}_min'] = gr1.min()
base[f'{name}_max'] = gr1.max()
base[f'{name}_max-min'] = base[f'{name}_max'] - base[f'{name}_min']
base[f'{name}_mean'] = gr1.mean()
base[f'{name}_std'] = gr1.std()
base[f'{name}_sum'] = gr1.sum()
base[f'{name}_nunique'] = gr1.apply(nunique)
# sum
gr1 = gr2[v].sum().groupby(KEY)
name = f'{PREF}_{keyname}_{v}_sum'
base[f'{name}_min'] = gr1.min()
base[f'{name}_max'] = gr1.max()
base[f'{name}_max-min'] = base[f'{name}_max'] - base[f'{name}_min']
base[f'{name}_mean'] = gr1.mean()
base[f'{name}_std'] = gr1.std()
base[f'{name}_nunique'] = gr1.apply(nunique)
base.to_pickle(f'../data/tmp_302_{PREF}_{k}.p')
# =============================================================================
# gr2
# =============================================================================
pool = Pool(NTHREAD)
callback = pool.map(multi_gr2, col_group)
pool.close()
# =============================================================================
# merge
# =============================================================================
df = pd.concat([ pd.read_pickle(f) for f in sorted(glob(f'../data/tmp_302_{PREF}*.p'))], axis=1)
base = | pd.concat([base, df], axis=1) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Credits: <NAME>, <NAME>
import os
os.environ["CUDA_VISIBLE_DEVICES"] = ""
os.environ["TF_XLA_FLAGS"] = "--tf_xla_cpu_global_jit"
# loglevel : 0 all printed, 1 I not printed, 2 I and W not printed, 3 nothing printed
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import fire
import yaml
import tensorflow as tf
import numpy as np
from Bio import SeqIO
import pandas as pd
import ray
from utils import preprocess as pp
from pathlib import Path
from models import model_5, model_7, model_10
from joblib import load
import psutil
def predict_nn(ds_path, nn_weights_path, length, n_cpus=3, batch_size=256):
"""
Breaks down contigs into fragments
and uses pretrained neural networks to give predictions for fragments
"""
pid = psutil.Process(os.getpid())
pid.cpu_affinity(range(n_cpus))
print("loading sequences for prediction")
try:
seqs_ = list(SeqIO.parse(ds_path, "fasta"))
except FileNotFoundError:
raise Exception("test dataset was not found. Change ds variable")
print("generating viral fragments and labels")
out_table = {
"id": [],
"length": [],
"fragment": [],
"pred_plant_5": [],
"pred_vir_5": [],
"pred_bact_5": [],
"pred_plant_7": [],
"pred_vir_7": [],
"pred_bact_7": [],
"pred_plant_10": [],
"pred_vir_10": [],
"pred_bact_10": [],
}
if not seqs_:
raise ValueError("All sequences were smaller than length of the model")
test_fragments = []
test_fragments_rc = []
ray.init(num_cpus=n_cpus, num_gpus=0, include_dashboard=False)
for seq in seqs_:
fragments_, fragments_rc, _ = pp.fragmenting([seq], length, max_gap=0.8,
sl_wind_step=int(length / 2))
test_fragments.extend(fragments_)
test_fragments_rc.extend(fragments_rc)
for j in range(len(fragments_)):
out_table["id"].append(seq.id)
out_table["length"].append(len(seq.seq))
out_table["fragment"].append(j)
it = pp.chunks(test_fragments, int(len(test_fragments) / n_cpus + 1))
test_encoded = np.concatenate(ray.get([pp.one_hot_encode.remote(s) for s in it]))
it = pp.chunks(test_fragments_rc, int(len(test_fragments_rc) / n_cpus + 1))
test_encoded_rc = np.concatenate(ray.get([pp.one_hot_encode.remote(s) for s in it]))
print('Encoding sequences finished')
print(
f"{np.shape(test_encoded)[0]} + {np.shape(test_encoded_rc)[0]} fragments generated")
ray.shutdown()
print('Starting sequence prediction')
for model, s in zip([model_5.model(length), model_7.model(length), model_10.model(length)], [5, 7, 10]):
model.load_weights(Path(nn_weights_path, f"model_{s}.h5"))
prediction = model.predict([test_encoded, test_encoded_rc], batch_size)
out_table[f"pred_plant_{s}"].extend(list(prediction[..., 0]))
out_table[f"pred_vir_{s}"].extend(list(prediction[..., 1]))
out_table[f"pred_bact_{s}"].extend(list(prediction[..., 2]))
print('Exporting predictions to csv file')
return | pd.DataFrame(out_table) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 6 07:24:08 2021
@author: stefanosbaros
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 26 07:54:08 2021
@author: <NAME> and <NAME>
"""
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import recall_score, precision_score, r2_score,mean_squared_error, accuracy_score
from sklearn import metrics
from sklearn import svm
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
import pickle
import matplotlib.pyplot as plt
from sklearn.metrics import plot_confusion_matrix
from imblearn.ensemble import BalancedRandomForestClassifier
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from numpy import loadtxt
import xgboost as xgb
from xgboost import XGBClassifier
# Our goal in this projest is to use logistic regression to predict
# whether someone with a certain profile of symptoms got infected with Covid-19 or not
# loading data
main_file="/Users/stefanosbaros/Desktop/Covid_ML_project/"
Covid_path = '/Users/stefanosbaros/Desktop/Covid_ML_project/corona_tested_individuals_ver_006.csv'
Covid_data = | pd.read_csv(Covid_path) | pandas.read_csv |
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import numpy as np
import pandas as pd
import astropy.units as u
import units_utils as units
import uncertainty_utils as uncertain
def draw_cornerplot(pred, labels=None, fig=None, color='black'):
n_data, n_params = pred.shape
plot = corner.corner(pred,
color=color,
smooth=1.0,
labels=labels,
#show_titles=True,
fill_contours=True,
bins=30,
fig=fig,
range=[0.68, 0.95, 0.997]*n_params,
hist_kwargs=dict(density=True, ))
return plot
def plot_sample_corner(fig, X, Y, sample, data_meta):
canvas = FigureCanvas(fig)
ax = fig.gca()
safety_mask = ~emulated.isnull().any(1)
plot_cols = ['psFlux_%s_mag' %bp for bp in 'giy']
plot_cols += ['cModelFlux_%s_mag' %bp for bp in 'giy']
plot_cols += ['Ixx', 'Ixy', 'Iyy']
X = X.loc[safety_mask, :][plot_cols]
Y = Y.loc[safety_mask, :][plot_cols]
reg_dim = Y.shape[1] - 1
sample = sample[safety_mask, :].reshape(-1, reg_dim)
sample = pd.DataFrame(sample, index=None, columns=meta['Y_cols'][:-1])
canvas = draw_cornerplot(corner_2d_pred, fig=canvas, color='tab:blue')
canvas = draw_cornerplot(corner_2d_obs, fig=canvas, color='tab:orange')
return fig
def plot_confusion_matrix(fig, X, Y, emulated):
canvas = FigureCanvas(fig)
ax = fig.gca()
truth_colname = 'star'
observed_colname = 'extendedness'
obs = (Y[observed_colname].values).astype(int)
em = (emulated[observed_colname].values > 0.5).astype(int)
count, _, _, _ = ax.hist2d(obs, em, norm=matplotlib.colors.LogNorm(), bins=2, cmap=plt.cm.get_cmap("gray")) #marker='.', alpha=0.2, label='_nolegend_')
ax.set_xlabel('observed')
ax.set_ylabel('emulated')
ax.set_title('stars vs. galaxies')
ax.set_xticks([0.25, 0.75])
ax.set_xticklabels(['not star (galaxy)', 'star'])
ax.set_yticks([0.25, 0.75])
ax.set_yticklabels(['not star (galaxy)', 'star'])
neg_x = 0.15
pos_x = 0.65
total = np.sum(count)
ax.text(pos_x, 0.75, "{} ({:.1f}%)".format(int(count[1, 1]), count[1, 1]/total*100.0), fontsize=16, color='white') # true positives
ax.text(neg_x, 0.75, "{} ({:.1f}%)".format(int(count[0, 1]), count[0, 1]/total*100.0), fontsize=16, color='white') # false positives
ax.text(pos_x, 0.25, "{} ({:.1f}%)".format(int(count[1, 0]), count[1, 0]/total*100.0), fontsize=16, color='white') # false negatives
ax.text(neg_x, 0.25, "{} ({:.1f}%)".format(int(count[0, 0]), count[0, 0]/total*100.0), fontsize=16) # true negatives
#cbar = plt.colorbar()
#plt.clim(1000, 60000)
canvas.draw()
return canvas
def plot_moment(fig, X, Y, emulated, emulated_second, moment_type, display_uncertainty='aleatoric', run='1.2i', plot_offset=True):
canvas = FigureCanvas(fig)
ax = fig.gca()
safety_mask = np.ones((X.shape[0])).astype(bool) # do not mask
em = emulated.loc[:, moment_type].values
em_second = emulated_second.loc[:, moment_type].values
obs = Y.loc[safety_mask, moment_type].values
n_observed = X.loc[safety_mask, 'n_obs'].values
al_sig = emulated.loc[safety_mask, '%s_al_sig' %moment_type].values
ep_sig = emulated.loc[safety_mask, '%s_ep_sig' %moment_type].values
obs_nomask = Y.loc[:, moment_type].values # for consistent plot scaling
if display_uncertainty == 'aleatoric':
display_sig = al_sig
elif display_uncertainty == 'epistemic':
display_sig = ep_sig
offset = (em - obs) # as - as
offset_second = (em_second - obs)
truth_mag = X.loc[safety_mask, 'truth_total_mag_r'].values
if moment_type in ['Ix', 'Iy']:
obs_err = uncertain.get_astrometric_error(truth_mag, 'r', n_observed) # as
unit = 'as'
elif moment_type in ['ra_offset', 'dec_offset']:
obs_err = uncertain.get_astrometric_error(truth_mag, 'r', n_observed)*1000.0 # mas
unit = 'mas'
elif moment_type in ['Ixx', 'Ixy', 'Iyy', 'IxxPSF', 'IxyPSF', 'IyyPSF']:
#obs_err = 2.0 * np.abs(obs) * uncertain.get_astrometric_error(truth_mag, 'r', n_observed) # as^2
obs_err = np.zeros_like(obs)
al_sig *= 2.0 * np.abs(obs)
ep_sig *= 2.0 * np.abs(obs)
unit = 'as^2'
# Sorting necessary for fill_between plots
sorted_id = np.argsort(obs)
sorted_obs = obs[sorted_id]
sorted_err = obs_err[sorted_id]
# Perfect mapping
perfect = np.linspace(np.min(obs_nomask), np.max(obs_nomask), 20)
# Baseline errors
ax.fill_between(sorted_obs, -sorted_err, sorted_err, alpha=0.5, facecolor='tab:orange', label=r'1-$\sigma$ photometric')
# Plot estimated uncertainty
ax.errorbar(obs, offset, color='tab:blue', marker='.', linewidth=0, yerr=display_sig, elinewidth=0.5, label=r'$N_1$ 1-$\sigma$ %s' %display_uncertainty)
ax.errorbar(obs, offset_second, color='tab:olive', marker='.', linewidth=0, yerr=display_sig, elinewidth=0.5, label=r'N_2 1-$\sigma$ %s' %display_uncertainty)
# Plot perfect mapping
ax.plot(perfect, np.zeros_like(perfect), linestyle='--', color='r', label="Perfect mapping")
#ax.set_ylim([-5, 5])
ax.set_title(moment_type)
ax.set_ylabel('Emulated - Observed (%s)' %unit)
ax.set_xlabel('Observed (%s)' %unit)
#ax.set_xscale('symlog')
ax.plot([], [], ' ', label=r"Avg 1-$\sigma$ epistemic: %.2f (%s)" %(np.mean(ep_sig), unit))
ax.plot([], [], ' ', label=r"Avg 1-$\sigma$ aleatoric: %.2f (%s)" %(np.mean(al_sig), unit))
ax.legend(loc=(1.05, 0.5))
canvas.draw()
return canvas
def plot_flux(fig, X, Y, emulated, emulated_second, flux_formatting, bandpass, display_uncertainty='aleatoric', run='1.2i', plot_offset=True):
canvas = FigureCanvas(fig)
ax = fig.gca()
flux_name = flux_formatting %bandpass
safety_mask = np.ones((X.shape[0])).astype(bool) # do not mask
zoom_factor = 1.e6
em = emulated.loc[safety_mask, flux_name].values
em_second = emulated_second.loc[safety_mask, flux_name].values
obs = Y.loc[safety_mask, flux_name].values
obs_mag = Y.loc[safety_mask, '%s_mag' %flux_name].values
al_sig = emulated.loc[safety_mask, '%s_al_sig_flux' %flux_name].values
ep_sig = emulated.loc[safety_mask, '%s_ep_sig_flux' %flux_name].values
if display_uncertainty == 'aleatoric':
display_sig = al_sig * zoom_factor
elif display_uncertainty == 'epistemic':
display_sig = ep_sig * zoom_factor
obs_nomask = Y.loc[:, flux_name].values * zoom_factor # for consistent plot scaling
offset = (em - obs) * zoom_factor # Jy - Jy
offset_second = (em_second - obs) * zoom_factor
obs_display = obs * zoom_factor
#truth_mag = X.loc[safety_mask, 'truth_total_mag_%s' %bandpass].values
obs_err = uncertain.get_photometric_error(obs_mag, 'r', run=run, gamma=None, coadd=True) # mag
obs_err = units.delta_flux(flux=obs, delta_mag=obs_err)
obs_err_display = obs_err * zoom_factor
# Sorting necessary for fill_between plots
sorted_id = np.argsort(obs_display)
sorted_obs = obs_display[sorted_id]
sorted_err = obs_err_display[sorted_id]
# Perfect mapping
perfect = np.linspace(np.min(obs_nomask), np.max(obs_nomask), 20)
# Baseline errors
ax.fill_between(sorted_obs, -sorted_err, sorted_err, alpha=0.5, facecolor='tab:orange', label=r'1-$\sigma$ photometric')
# Plot estimated uncertainty
ax.errorbar(obs_display, offset, color='tab:blue', marker='.', linewidth=0, yerr=display_sig, elinewidth=0.5, label=r'$N_1$ 1-$\sigma$ %s' %display_uncertainty)
ax.errorbar(obs_display, offset_second, color='tab:olive', marker='.', linewidth=0, yerr=display_sig, elinewidth=0.5, label=r'$N_2$ 1-$\sigma$ %s' %display_uncertainty)
# Plot perfect mapping
ax.plot(perfect, np.zeros_like(perfect), linestyle='--', color='r', label="Perfect mapping")
#ax.set_ylim([-5, 5])
ax.set_title(flux_name)
ax.set_ylabel('Emulated - Observed (microJy)')
ax.set_xlabel('Observed (microJy)')
ax.set_xlim( [np.min(obs_nomask), np.max(obs_nomask)] )
#plt.yscale('symlog')
ax.set_xscale('symlog')
#print(param_star)
ax.plot([], [], ' ', label=r"Avg 1-$\sigma$ epistemic: %.2f (microJy)" %np.mean(ep_sig))
ax.plot([], [], ' ', label=r"Avg 1-$\sigma$ aleatoric: %.2f (microJy)" %np.mean(al_sig))
ax.legend(loc=(1.05, 0.5))
canvas.draw()
return canvas
def plot_magnitude(fig, X, Y, emulated, emulated_second, flux_formatting, bandpass, display_uncertainty='aleatoric', run='1.2i', plot_offset=True):
canvas = FigureCanvas(fig)
ax = fig.gca()
flux_name = flux_formatting %bandpass
safety_mask = ~emulated.loc[:, '%s_mag' %flux_name].isnull()
em_mag = emulated.loc[safety_mask, '%s_mag' %flux_name].values
em_mag_second = emulated_second.loc[safety_mask, '%s_mag' %flux_name].values
obs_mag = Y.loc[safety_mask, '%s_mag' %flux_name].values
al_sig_mag = emulated.loc[safety_mask, '%s_al_sig_mag' %flux_name].values
ep_sig_mag = emulated.loc[safety_mask, '%s_ep_sig_mag' %flux_name].values
obs_nomask = Y.loc[:, '%s_mag' %flux_name].values # for consistent plot scaling
obs_nomask = obs_nomask[np.isfinite(obs_nomask)]
if display_uncertainty == 'aleatoric':
display_sig = al_sig_mag
elif display_uncertainty == 'epistemic':
display_sig = ep_sig_mag
offset = (em_mag - obs_mag) # mag - mag
offset_second = (em_mag_second - obs_mag)
#truth_mag = X.loc[safety_mask, 'truth_total_mag_%s' %bandpass].values
obs_err = uncertain.get_photometric_error(obs_mag, 'r', run=run, gamma=None, coadd=True) # mag
# Sorting necessary for fill_between plots
sorted_id = np.argsort(obs_mag)
sorted_obs = obs_mag[sorted_id]
sorted_err = obs_err[sorted_id]
# Perfect mapping
perfect = np.linspace(np.min(obs_nomask), np.max(obs_nomask), 20)
# Baseline errors
ax.fill_between(sorted_obs, -sorted_err, sorted_err, alpha=0.5, facecolor='tab:orange', label=r'1-$\sigma$ photometric')
# Plot estimated uncertainty
ax.errorbar(obs_mag, offset, color='tab:blue', marker='.', linewidth=0, yerr=display_sig, elinewidth=0.5, label=r'$N_1$ 1-$\sigma$ %s' %display_uncertainty)
ax.errorbar(obs_mag, offset_second, color='tab:olive', marker='.', linewidth=0, yerr=display_sig, elinewidth=0.5, label=r'$N_2$ 1-$\sigma$ %s' %display_uncertainty)
# Plot perfect mapping
ax.plot(perfect, np.zeros_like(perfect), linestyle='--', color='r', label="Perfect mapping")
#plt.plot(perfect, perfect, linestyle='--', color='r', label="Perfect mapping")
ax.set_ylim([-5, 5])
ax.set_title(flux_name)
ax.set_ylabel('Emulated - Observed (mag)')
ax.set_xlabel('Observed (mag)')
ax.set_xlim( [np.min(obs_nomask), min(28.0, np.max(obs_nomask))] )
ax.plot([], [], ' ', label=r"Avg 1-$\sigma$ epistemic: %.2f (mag)" %np.mean(ep_sig_mag))
ax.plot([], [], ' ', label=r"Avg 1-$\sigma$ aleatoric: %.2f (mag)" %np.mean(al_sig_mag))
#ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend(loc=(1.05, 0.5))
canvas.draw()
return canvas
def get_natural_units(X, Y, mu, al_sig2, ep_sig2, F, mu_second, al_sig2_second, ep_sig2_second, F2, alpha, mu_class, al_sig2_class, ep_sig2_class, meta):
revert_flux = 1.0/meta['scale_flux']
null_mag_flag = -1
ref_centroid = meta['ref_centroid']
# For broadcasting
X_mean = np.array(meta['X_mean']).reshape(1, -1)
X_std = np.array(meta['X_std']).reshape(1, -1)
# Unstandardize
X = X*X_std + X_mean
# Concat classification results
mu = np.concatenate([mu_class, mu,], axis=1)
mu_second = np.concatenate([mu_class, mu_second,], axis=1)
al_sig2 = np.concatenate([al_sig2_class, al_sig2,], axis=1)
ep_sig2 = np.concatenate([ep_sig2_class, ep_sig2,], axis=1)
al_sig2_second = np.concatenate([al_sig2_class, al_sig2_second,], axis=1)
ep_sig2_second = np.concatenate([ep_sig2_class, ep_sig2_second,], axis=1)
# Dictify
X = | pd.DataFrame(X, index=None, columns=meta['X_cols']) | pandas.DataFrame |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
def test_constructors_errors(self):
# scalar
msg = ('IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex([0, 1])
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_intervals([0, 1])
# invalid closed
msg = "invalid options for 'closed': invalid"
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed within intervals
msg = 'intervals must all be closed on the same side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with tm.assert_raises_regex(ValueError, msg):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# mismatched closed inferred from intervals vs constructor.
msg = 'conflicting values for closed'
with tm.assert_raises_regex(ValueError, msg):
iv = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
IntervalIndex(iv, closed='neither')
# no point in nesting periods in an IntervalIndex
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
# decreasing breaks/arrays
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(range(10, -1, -1))
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1))
def test_constructors_datetimelike(self, closed):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx, closed=closed)
expected = IntervalIndex.from_breaks(idx.values, closed=closed)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert not index.hasnans
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(pd.date_range('20130101', periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='foo')
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='bar')
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed)
assert not expected.equals(expected_other_closed)
def test_astype(self, closed):
idx = self.create_index(closed=closed)
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_delete(self, closed):
expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed)
result = self.create_index(closed=closed).delete(0)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [
interval_range(0, periods=10, closed='neither'),
interval_range(1.7, periods=8, freq=2.5, closed='both'),
interval_range(Timestamp('20170101'), periods=12, closed='left'),
interval_range(Timedelta('1 day'), periods=6, closed='right'),
IntervalIndex.from_tuples([('a', 'd'), ('e', 'j'), ('w', 'z')]),
IntervalIndex.from_tuples([(1, 2), ('a', 'z'), (3.14, 6.28)])])
def test_insert(self, data):
item = data[0]
idx_item = IntervalIndex([item])
# start
expected = idx_item.append(data)
result = data.insert(0, item)
tm.assert_index_equal(result, expected)
# end
expected = data.append(idx_item)
result = data.insert(len(data), item)
tm.assert_index_equal(result, expected)
# mid
expected = data[:3].append(idx_item).append(data[3:])
result = data.insert(3, item)
tm.assert_index_equal(result, expected)
# invalid type
msg = 'can only insert Interval objects and NA into an IntervalIndex'
with tm.assert_raises_regex(ValueError, msg):
data.insert(1, 'foo')
# invalid closed
msg = 'inserted item must be closed on the same side as the index'
for closed in {'left', 'right', 'both', 'neither'} - {item.closed}:
with tm.assert_raises_regex(ValueError, msg):
bad_item = Interval(item.left, item.right, closed=closed)
data.insert(1, bad_item)
# GH 18295 (test missing)
na_idx = IntervalIndex([np.nan], closed=data.closed)
for na in (np.nan, pd.NaT, None):
expected = data[:1].append(na_idx).append(data[1:])
result = data.insert(1, na)
tm.assert_index_equal(result, expected)
def test_take(self, closed):
index = self.create_index(closed=closed)
result = index.take(range(10))
tm.assert_index_equal(result, index)
result = index.take([0, 0, 1])
expected = IntervalIndex.from_arrays(
[0, 0, 1], [1, 1, 2], closed=closed)
tm.assert_index_equal(result, expected)
def test_unique(self, closed):
# unique non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_unique
# unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed)
assert idx.is_unique
# unique overlapping - shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_unique
# unique nested
idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed)
assert idx.is_unique
# duplicate
idx = IntervalIndex.from_tuples(
[(0, 1), (0, 1), (2, 3)], closed=closed)
assert not idx.is_unique
# unique mixed
idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')], closed=closed)
assert idx.is_unique
# duplicate mixed
idx = IntervalIndex.from_tuples(
[(0, 1), ('a', 'b'), (0, 1)], closed=closed)
assert not idx.is_unique
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_unique
def test_monotonic(self, closed):
# increasing non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing non-overlapping
idx = IntervalIndex.from_tuples(
[(4, 5), (2, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (4, 5), (2, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping
idx = IntervalIndex.from_tuples(
[(0, 2), (0.5, 2.5), (1, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping
idx = IntervalIndex.from_tuples(
[(1, 3), (0.5, 2.5), (0, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered overlapping
idx = IntervalIndex.from_tuples(
[(0.5, 2.5), (0, 2), (1, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(2, 3), (1, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# stationary
idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed)
assert idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
def test_get_item(self, closed):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed=closed)
assert i[0] == Interval(0.0, 1.0, closed=closed)
assert i[1] == Interval(1.0, 2.0, closed=closed)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed=closed)
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed=closed)
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed=closed)
tm.assert_index_equal(result, expected)
def test_get_loc_value(self):
pytest.raises(KeyError, self.index.get_loc, 0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
pytest.raises(KeyError, self.index.get_loc, -1)
pytest.raises(KeyError, self.index.get_loc, 3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='int64'))
assert idx.get_loc(3) == 1
pytest.raises(KeyError, idx.get_loc, 3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
pytest.raises(KeyError, idx.get_loc, 1.5)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)],
closed='both')
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(1, 2) == (0, 2)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
with pytest.raises(KeyError):
index.slice_locs(1, 2)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
pytest.raises(KeyError, self.index.get_loc, Interval(2, 3))
pytest.raises(KeyError, self.index.get_loc,
Interval(-1, 0, 'left'))
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
def testcontains(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
assert i.contains(0.1)
assert i.contains(0.5)
assert i.contains(1)
assert i.contains(Interval(0, 1))
assert i.contains(Interval(0, 2))
# these overlaps completely
assert i.contains(Interval(0, 3))
assert i.contains(Interval(1, 3))
assert not i.contains(20)
assert not i.contains(-20)
def test_dropna(self, closed):
expected = IntervalIndex.from_tuples(
[(0.0, 1.0), (1.0, 2.0)], closed=closed)
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays(
[0, 1, np.nan], [1, 2, np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
def test_non_contiguous(self, closed):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed)
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
def test_union(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(13), closed=closed)
result = index.union(other)
tm.assert_index_equal(result, expected)
result = other.union(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.union(index), index)
tm.assert_index_equal(index.union(index[:1]), index)
def test_intersection(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(5, 11), closed=closed)
result = index.intersection(other)
tm.assert_index_equal(result, expected)
result = other.intersection(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.intersection(index), index)
def test_difference(self, closed):
index = self.create_index(closed=closed)
tm.assert_index_equal(index.difference(index[:1]), index[1:])
def test_symmetric_difference(self, closed):
idx = self.create_index(closed=closed)
result = idx[1:].symmetric_difference(idx[:-1])
expected = IntervalIndex([idx[0], idx[-1]])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('op_name', [
'union', 'intersection', 'difference', 'symmetric_difference'])
def test_set_operation_errors(self, closed, op_name):
index = self.create_index(closed=closed)
set_op = getattr(index, op_name)
# test errors
msg = ('can only do set operations between two IntervalIndex objects '
'that are closed on the same side')
with tm.assert_raises_regex(ValueError, msg):
set_op(Index([1, 2, 3]))
for other_closed in {'right', 'left', 'both', 'neither'} - {closed}:
other = self.create_index(closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
set_op(other)
def test_isin(self, closed):
index = self.create_index(closed=closed)
expected = np.array([True] + [False] * (len(index) - 1))
result = index.isin(index[:1])
tm.assert_numpy_array_equal(result, expected)
result = index.isin([index[0]])
tm.assert_numpy_array_equal(result, expected)
other = IntervalIndex.from_breaks(np.arange(-2, 10), closed=closed)
expected = np.array([True] * (len(index) - 1) + [False])
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
for other_closed in {'right', 'left', 'both', 'neither'}:
other = self.create_index(closed=other_closed)
expected = np.repeat(closed == other_closed, len(index))
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
def test_comparison(self):
actual = Interval(0, 1) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = Interval(0.5, 1.5) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > Interval(0.5, 1.5)
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index <= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index >= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index < self.index
expected = np.array([False, False])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == IntervalIndex.from_breaks([0, 1, 2], 'left')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index.values == self.index
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index <= self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index != self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index > self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index.values > self.index
tm.assert_numpy_array_equal(actual, np.array([False, False]))
# invalid comparisons
actual = self.index == 0
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index == self.index.left
tm.assert_numpy_array_equal(actual, np.array([False, False]))
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index > 0
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index <= 0
with pytest.raises(TypeError):
self.index > np.arange(2)
with pytest.raises(ValueError):
self.index > np.arange(3)
def test_missing_values(self, closed):
idx = Index([np.nan, Interval(0, 1, closed=closed),
Interval(1, 2, closed=closed)])
idx2 = IntervalIndex.from_arrays(
[np.nan, 0, 1], [np.nan, 1, 2], closed=closed)
assert idx.equals(idx2)
with pytest.raises(ValueError):
IntervalIndex.from_arrays(
[np.nan, 0, 1], np.array([0, 1, 2]), closed=closed)
tm.assert_numpy_array_equal(isna(idx),
np.array([True, False, False]))
def test_sort_values(self, closed):
index = self.create_index(closed=closed)
result = index.sort_values()
tm.assert_index_equal(result, index)
result = index.sort_values(ascending=False)
tm.assert_index_equal(result, index[::-1])
# with nan
index = IntervalIndex([Interval(1, 2), np.nan, Interval(0, 1)])
result = index.sort_values()
expected = IntervalIndex([Interval(0, 1), Interval(1, 2), np.nan])
tm.assert_index_equal(result, expected)
result = index.sort_values(ascending=False)
expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)])
tm.assert_index_equal(result, expected)
def test_datetime(self):
dates = date_range('2000', periods=3)
idx = IntervalIndex.from_breaks(dates)
tm.assert_index_equal(idx.left, dates[:2])
tm.assert_index_equal(idx.right, dates[-2:])
expected = date_range('2000-01-01T12:00', periods=2)
tm.assert_index_equal(idx.mid, expected)
assert Timestamp('2000-01-01T12') not in idx
assert Timestamp('2000-01-01T12') not in idx
target = date_range('1999-12-31T12:00', periods=7, freq='12H')
actual = idx.get_indexer(target)
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_append(self, closed):
index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed)
index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed)
result = index1.append(index2)
expected = IntervalIndex.from_arrays(
[0, 1, 1, 2], [1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
result = index1.append([index1, index2])
expected = IntervalIndex.from_arrays(
[0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
msg = ('can only append two IntervalIndex objects that are closed '
'on the same side')
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
index_other_closed = IntervalIndex.from_arrays(
[0, 1], [1, 2], closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
index1.append(index_other_closed)
def test_is_non_overlapping_monotonic(self, closed):
# Should be True in all cases
tpls = [(0, 1), (2, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is True
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is True
# Should be False in all cases (overlapping)
tpls = [(0, 2), (1, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False in all cases (non-monotonic)
tpls = [(0, 1), (2, 3), (6, 7), (4, 5)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False for closed='both', overwise True (GH16560)
if closed == 'both':
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is False
else:
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is True
class TestIntervalRange(object):
def test_construction_from_numeric(self, closed, name):
# combinations of start/end/periods without freq
expected = IntervalIndex.from_breaks(
np.arange(0, 6), name=name, closed=closed)
result = interval_range(start=0, end=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=5, periods=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with freq
expected = IntervalIndex.from_tuples([(0, 2), (2, 4), (4, 6)],
name=name, closed=closed)
result = interval_range(start=0, end=6, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=3, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=6, periods=3, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
expected = IntervalIndex.from_tuples([(0.0, 1.5), (1.5, 3.0)],
name=name, closed=closed)
result = interval_range(start=0, end=4, freq=1.5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
def test_construction_from_timestamp(self, closed, name):
# combinations of start/end/periods without freq
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-06')
breaks = date_range(start=start, end=end)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with fixed freq
freq = '2D'
start, end = Timestamp('2017-01-01'), | Timestamp('2017-01-07') | pandas.Timestamp |
#!/usr/bin/env python
# coding: utf-8
# Author : <NAME>
# Initial Date: Feb 17, 2020
# About: strymread class to read CAN data from CSV file captured using
# libpanda (https://jmscslgroup.github.io/libpanda/) or from `strym` class.
# Read associated README for full description
# License: MIT License
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
# ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS, COPYRIGHT HOLDERS OR ARIZONA BOARD OF REGENTS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = '<NAME>'
__email__ = '<EMAIL>'
# For System and OS level task
import sys, getopt
## General Data processing and visualization Import
import time
import ntpath
import datetime
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (16,8)
plt.rcParams["image.cmap"] = "Dark2"
# to change default color cycle
plt.rcParams['axes.prop_cycle'] = plt.cycler(color=plt.cm.Dark2.colors)
from scipy.interpolate import interp1d
from scipy import signal
import pandas as pd # Note that this is not commai Panda, but Database Pandas
from scipy import integrate
import pickle
import os
from os.path import expanduser
import seaborn as sea
import plotly.express as px
import csv
import copy
import scipy.stats
# cantools import
import cantools
import strym.DBC_Read_Tools as dbc
import pkg_resources
from subprocess import Popen, PIPE
from .utils import configure_logworker
LOGGER = configure_logworker()
dbc_resource = ''
try:
import importlib.resources as pkg_resources
with pkg_resources.path('strym', 'dbc') as rsrc:
dbc_resource = rsrc
except ImportError:
# Try backported to PY<37 `importlib_resources`.
print("Python older than 3.7 detected. ")
try:
import importlib_resources as pkg_resources
with pkg_resources.path('strym', 'dbc') as rsrc:
dbc_resource = rsrc
except ImportError:
print("importlib_resources not found. Install backported importlib_resources through `pip install importlib-resources`")
import vin_parser as vp
# from sqlalchemy import create_engine
import sqlite3
import matplotlib.colors as colors
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
import IPython
shell_type = IPython.get_ipython().__class__.__name__
if shell_type in ['ZMQInteractiveShell', 'TerminalInteractiveShell']:
import plotly.offline as pyo
# Set notebook mode to work in offline
pyo.init_notebook_mode()
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from .config import config
class strymread:
'''
`strymread` reads the logged CAN data from the given CSV file.
This class provides several utilities functions
Parameters
----------------
csvfile: `str`, `pandas.DataFrame`, default = None
The CSV file to be read. If `pandas.DataFrame` is supplied, then csvfile is set to None
PandasDataFrame, if provided, must have columns ["Time", "Message", "MessageID", "Bus"]
dbcfile: `str`, default = ""
The DBC file which will provide codec for decoding CAN messages
kwargs: variable list of argument in the dictionary format
bus: `list` | default = None
A list of integer correspond to Bus ID.
dbcfolder: `str` | default = None
Specifies a folder path where to look for appropriate dbc if dbcfile='' or dbcfile = None
Appropriate dbc file can be inferred from <brand>_<model>_<year>.dbc
If dbcfolder is None or empty string, then by default, strymread will look for dbc file in the dbc folder of the package where we ship sample dbc file to work with.
verbose: `bool`
Option for verbosity, prints some information when True
createdb: `bool`
If True, creates a sqlite3 database for raw CAN data if the database doesn't exist
dbdir: `str`
Optional argument that specifies where sqlite3 database will be stored.
The default location is `~/.strym/`
Attributes
---------------
dbcfile: `str`, default = ""
The filepath of DBC file
csvfile:`str` | `pandas.DataFrame`
The filepath of CSV Data file, or, raw CAN Message DataFrame
dataframe: `pandas.Dataframe`
Pandas dataframe that stores content of csvfile as dataframe
dataframe_raw: `pandas.Dataframe`
Pandas original dataframe with all bus IDs. When `bus=` is passed to the constructor to filter out dataframe based on bus id, then original dataframe is save
in dataframe_raw
candb: `cantools.db`
CAN database fetched from DBC file
burst: `bool`
A boolean flag that checks if CAN data came in burst. If `True`, then CAN Data was captured in burst, else
`False`. If CAN Data came in burst (as in say 64 messages at a time or so)
then any further analysis might not be reliable. Always check that.
success: `bool`
A boolean flag, if `True`, tells that reading of CSV file was successful.
bus: `list` | default = None
A list of integer correspond to Bus ID.
dbcfolder: `str` | default = None
Specifies a folder path where to look for appropriate dbc if `dbcfile=""` or `dbcfile = None`
Appropriate dbc file can be inferred from <brand>_<model>_<year>.dbc
If dbcfolder is None or empty string, then by default, strymread will look for dbc file in package's dbcfolder
where we ship sample dbc file to work with.
dbdir:`str`
Location of database where sqlite3 database for CAN Dataframe will stored.
Default location: `~/.strym/`
database: `str`
The name of the database corresponding to the model/make of the vehicle from which the CAN data
was captured
inferred_dbc: `str`
DBC file inferred from the name of the csvfile passed.
Returns
---------------
`strymread`
Returns an object of type `strymread` upon successful reading or else return None
Example
----------------
>>> import strym
>>> from strym import strymread
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> dbcfile = 'newToyotacode.dbc'
>>> csvdata = '2020-03-20.csv'
>>> r0 = strymread(csvfile=csvdata, dbcfile=dbcfile)
'''
sunset = truncate_colormap(plt.get_cmap('magma'), 0.0, 0.7) # truncated color map from magma
def __init__(self, csvfile, dbcfile = "", **kwargs):
# success attributes will be set to True ultimately if everything goes well and csvfile is read successfully
self.success = False
if csvfile is None:
print("csvfile is None. Unable to proceed with further analysis. See https://jmscslgroup.github.io/strym/api_docs.html#module-strym for further details.")
return
if isinstance(csvfile, pd.DataFrame):
self.dataframe = csvfile
self.csvfile = ''
if ((len(dbcfile) == 0) or (dbcfile is None)):
print("Please provide a valid dbcfile using argument `dbcfile` to strymread if you intend to supply a dataframe to strymread")
return
elif isinstance(csvfile, str):
# Check if file exists
if not os.path.exists(csvfile):
print("Provided csvfile: {} doesn't exist, or read permission error".format(csvfile))
return
# if file size is less than 60 bytes, return without processing
if os.path.getsize(csvfile) < 60:
print("Nothing significant to read in {}. No further analysis is warranted.".format(csvfile))
return
self.csvfile = csvfile
self.basefile = ntpath.basename(csvfile)
else:
print("Unsupported type for csvfile. Please see https://jmscslgroup.github.io/strym/api_docs.html#module-strym for further details.")
return
# Optional argument for verbosity
self.verbose = kwargs.get("verbose", False)
# Optional argument for bus ID
self.bus = kwargs.get("bus", None)
# Optional argument for dbcfolder where to look for dbc files
self.dbcfolder = kwargs.get("dbcfolder", None)
# Optional argument to tell strymread whether to create a table of the raw count in the db
self.createdb = kwargs.get("createdb", False)
default_db_dir = expanduser("~") + "/.strym/"
# Optional argument for where TIMESERIES DB will be saved
self.dbdir = kwargs.get("dbdir", default_db_dir)
if not os.path.exists(self.dbdir):
if self.verbose:
print("The directory {} for timeseries db doesn't exist, creating one".format(self.dbdir ))
try:
os.mkdir(self.dbdir)
except OSError as error:
print(error)
# If a single bus ID is passed, convert it to list of one item, if multiple bus ID
# needs to be passed, then it must be passed as int
if isinstance(self.bus, int):
self.bus = [self.bus]
# If data were recorded in the first then burst attribute will be set to True. In practical scenario, we won't proceeding
# with further analysis when data comes in burst, however, if csvfile has data in burst, no real error will be raised. It
# will be upto user to check attribute boolean for True/False
self.burst = False
if len(self.csvfile) > 0:
# All CAN messages will be saved as pandas dataframe
try:
# Get the number of rows using Unix `wc` word count function
is_windows = sys.platform.startswith('win')
if not is_windows:
word_counts = Popen(['wc', '-l', self.csvfile], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = word_counts.communicate()
output = output.decode("utf-8")
output = output.strip()
output = output.split(' ')
n_lines = int(output[0])
if n_lines < 5:
print("Not enough data to read in the provided csvfile {}".format(ntpath.basename(self.csvfile)))
return
self.dataframe = pd.read_csv(self.csvfile,dtype={'Time': np.float64,'Bus':np.uint8, 'MessageID': np.uint32, 'Message': str, 'MessageLength': np.uint16}, nrows=n_lines - 2)
else:
self.dataframe = pd.read_csv(self.csvfile,dtype={'Time': np.float64,'Bus':np.uint8, 'MessageID': np.uint32, 'Message': str, 'MessageLength': np.uint16}, skipfooter=2)
except pd.errors.ParserError:
print("Ill-formated CSV File. A properly formatted CAN-data CSV file must have at least following columns: ['Time', 'Bus', 'MessageID', 'Message']")
print("No data was written the csvfile. Unable to perform further operation")
return
except UnicodeDecodeError:
print("Ill-formated CSV File. A properly formatted CAN-data CSV file must have at least following columns: ['Time', 'Bus', 'MessageID', 'Message']")
print("No data was written to the csvfile. Unable to perform further operation")
return
except pd.errors.EmptyDataError:
print("CSVfile is empty.")
return
if self.dataframe.shape[0] == 0:
print("No data was present in the csvfile or pandas dataframe supplied is empty. Unable to perform further operation")
return
self.dataframe = self.dataframe.dropna()
if set(['Time', 'MessageID', 'Message', 'Bus']).issubset(self.dataframe.columns) == False:
print("Ill-formated CSV File or pandas dataframe. A properly formatted CAN-data CSV file/dataframe must have at least following columns: ['Time', 'Bus', 'MessageID', 'Message']")
print("Unable to perform further operation")
return
if np.any(np.diff(self.dataframe['Time'].values) < 0.0):
print("Warning: Timestamps are not monotonically increasing. Further analysis is not recommended.")
return
def vin(csvfile):
"""
returns the vehicle identification number, VIN, (if detected) from the filename
uses a very very simple method of looking for a 17 char string near the end of the filename
Parameters
--------------
csvfile: `str`
Parse VIN number from the name of the `csvfile`
"""
# we use underscores to split up the filename
splits = csvfile.split('_')
candidates = []
# print(f'The splits of the file are {splits}')
for split in splits:
# all VIN are 17 chars long
if len(split) == 17:
# keep them in an array, in case the path has other 17 char elements
candidates.append(split)
if len(candidates) >= 1:
# return the end element, as none of our fileendings has 17 char elements at this time
# HACK: if folks create _some17charfileending.csv then this will fail
return candidates[-1]
else:
return 'VIN not part of filename'
vin = vin(self.csvfile)
brand = "toyota"
model = "rav4"
year = "2019"
try:
if vp.check_valid(vin) == True:
brand = vp.manuf(vin)
brand = brand.split(" ")[0].lower()
try:
model = vp.online_parse(vin)['Model'].lower()
except ConnectionError as e:
print("Retrieving model of the vehicle requires internet connection. Check your connection.")
return
year = str(vp.year(vin))
LOGGER.info("Vehicle model infered is {}-{}-{}".format(brand, model, year))
except:
if self.verbose:
print('No valid vin... Continuing as Toyota RAV4. If this is inaccurate, please append VIN number to csvfile prefixed with an underscore.')
self.inferred_dbc = "{}_{}_{}.dbc".format(brand, model, year)
if (dbcfile is None) or(dbcfile==""):
dbcfile = str(dbc_resource) + "/" + self.inferred_dbc
if not os.path.exists(dbcfile):
print("The dbcfile: {} doesn't exist, or read permission error".format(dbcfile))
return
# if control comes to the point, then the reading of CSV file was successful
self.success = True
self.dataframe = self.timeindex(self.dataframe, inplace=True)
self.dataframe_raw = None
if self.bus is not None:
if not np.all(np.isin(self.bus, self.dataframe['Bus'].unique())):
print("One of the bus id not available.")
print("Available BUS IDs are {}".format(self.dataframe['Bus'].unique()))
self.success = False
return
else:
self.dataframe_raw = self.dataframe.copy(deep = True)
self.dataframe = self.dataframe[self.dataframe['Bus'].isin(self.bus)]
# Check if data came in burst
T = self.dataframe['Time'].diff()
T_head = T[1:64]
if np.mean(T_head) == 0.0:
self.burst = True
# DBC file that has CAN message codec
self.dbcfile = dbcfile
# save the CAN database for later use
if self.dbcfile:
self.candb = cantools.db.load_file(self.dbcfile)
else:
self.candb = None
# initialize the dbc lookups for any particular usage
# this creates the dict later used to figure out which signals/msgs to
# use when decoding these data
self._dbc_init_dict()
# We will create an SQLite DB based on VIN number
self.database = brand.upper() + '_' + model.upper() + '_' + year.upper() + ".db"
self.raw_table = "RAW_CAN"
self.db_location = '{}{}'.format(self.dbdir, self.database)
if self.createdb:
dbconnection = self.dbconnect(self.db_location)
cursor = dbconnection.cursor()
cursor.execute('CREATE TABLE IF NOT EXISTS {} (Clock TIMESTAMP, Time REAL NOT NULL, Bus INTEGER, MessageID INTEGER, Message TEXT, MessageLength INTEGER, PRIMARY KEY (Clock, Bus, MessageID, Message));'.format(self.raw_table))
dbconnection.commit()
try:
self.dataframe[['Time', 'Bus', 'MessageID', 'Message', 'MessageLength']].to_sql(self.raw_table, con=dbconnection, index=True, if_exists='append')
except sqlite3.IntegrityError as e:
print(e)
if self.verbose:
print("Attempted to insert duplicate entries to the RAW_CAN table.\nRAW_CAN table has (Clock, Bus, MessageID, Message) composite primary key.")
def dbconnect(self, db_location):
"""
Creates dbconnection and returns db connection object
Parameters
------------
db_location: `str`
sqlite db url
"""
dbconnection = None
try:
dbconnection = sqlite3.connect(db_location)
except sqlite3.Error as e:
print(e)
# dbengine = create_engine(db_location, echo = self.verbose )
# dbengine.connect()
# dbconnection = self.dbengine.raw_connection()
return dbconnection
def _set_dbc(self):
'''
`_set_dbc` sets the DBC file
'''
self.dbcfile = input('DBC file unspecified. Enter the filepath of the DBC file: ')
if self.dbcfile:
try:
self.dbcfile = str(self.dbcfile)
print("The new DBC file entered is: {}".format(self.dbcfile))
except ValueError:
print('DBC file entered is not a string')
raise
self.candb = cantools.db.load_file(self.dbcfile)
def get_ts(self, msg, signal, verbose=False):
'''
`get_ts` returns Timeseries data by given `msg_name` and `signal_name`
Parameters
-------------
msg: `string` | `int`
A valid message that can be found in the given DBC file. Can be specified as message name or message ID
signal: `string` | `int`
A valid signal in string format corresponding to `msg_name` that can be found in the given DBC file. Can be specified as signal name or signal ID
verbose: `bool`, default = False
If True, print some information
'''
if not self.dbcfile:
self._set_dbc()
assert(isinstance(msg, int) or isinstance(msg, str)), ("Only Integer message ID or string name is supported for msg_name")
assert(isinstance(signal, int) or isinstance(signal, str)), ("Only Integer signal ID or string name is supported for signal_name")
if isinstance(msg, int):
msg = dbc.getMessageName(msg, self.candb)
if verbose:
print("Message Name: {}".format(msg))
if isinstance(signal, int):
signal = dbc.getSignalName(msg, signal, self.candb)
if verbose:
print("Signal Name: {}\n".format(signal))
# try-exception is fix for hybrid RAV4 since if you are using data
# from a hybrid the accel message length is 4 vs. 8 in the Internal Combustion Engine
ts = pd.DataFrame(columns = ["Time", "Message"])
try:
ts = dbc.convertData(msg, signal, self.dataframe, self.candb)
except ValueError as e:
if (isinstance(msg, int) and msg == 552) or (isinstance(msg, str) and msg == 'ACCELEROMETER'):
if 'Short' in str(e):
LOGGER.info('Found RAV4 where acceleration messages are 4 bytes.')
# accel_def = self.candb.get_message_by_name('ACCELEROMETER')
# index_of_acceldef = 0
# for i, m in enumerate(self.candb.messages):
# if m == accel_def:
# index_of_acceldef = i
# break
# accel_def.length = 4
# self.candb.messages[index_of_acceldef] = accel_def
self.dataframe = dbc.CleanData(self.dataframe,address=552)
ts = dbc.convertData(msg, signal, self.dataframe, self.candb)
return ts
def messageIDs(self):
'''
Retreives list of all messages IDs available in the given CSV-formatted CAN data file.
Returns
---------
`list`
A python list of all available message IDs in the given CSV-formatted CAN data file.
'''
msgIDs = self.dataframe['MessageID'].unique()
msgIDs.sort()
return msgIDs
def count(self, plot = False):
'''
A utility function to return and optionally plot the counts for each Message ID as bar graph
Returns
----------
`pandas.DataFrame`
A pandas DataFrame with total message counts per Message ID and total count by Bus
Example
---------
>>> import strym
>>> from strym import strymread
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> dbcfile = 'newToyotacode.dbc'
>>> csvdata = '2020-03-20.csv'
>>> r0 = strymread(csvfile=csvlist[0], dbcfile=dbcfile)
>>> r0.count()
'''
dataframe = self.dataframe
if plot:
r1 = dataframe[dataframe['MessageID'] <=200]
r2 = dataframe[(dataframe['MessageID'] >200) & (dataframe['MessageID'] <= 400)]
r3 = dataframe[(dataframe['MessageID'] >400) & (dataframe['MessageID'] <= 600)]
r4 = dataframe[(dataframe['MessageID'] >600) & (dataframe['MessageID'] <= 800)]
r5 = dataframe[(dataframe['MessageID'] >800) & (dataframe['MessageID'] <= 1000)]
r6 = dataframe[(dataframe['MessageID'] >1000) & (dataframe['MessageID'] <= 1200)]
r7 = dataframe[(dataframe['MessageID'] >1200) & (dataframe['MessageID'] <= 1400)]
r8 = dataframe[(dataframe['MessageID'] >1400) ]
r_df = [r1, r2, r3, r4, r5, r6, r7, r8]
self._setplots(ncols=2, nrows=4)
fig, axes = self.create_fig(ncols=2, nrows=4)
plt.rcParams['figure.figsize'] = (16, 8)
fig.tight_layout(pad=5.0)
ax = axes.ravel()
for i in range(0, 8):
cnt = r_df[i]['MessageID'].value_counts()
cnt = cnt.sort_index(ascending=True)
if cnt.empty:
continue
cnt.plot(kind='bar', ax=ax[i])
ax[i].tick_params(axis="x")
ax[i].tick_params(axis="y")
fig.suptitle("Message ID counts: "+ ntpath.basename(self.csvfile), y=0.98)
fig.show()
bus = dataframe['Bus'].unique()
bus.sort()
columns = ['Counts_Bus_' + str(int(s)) for s in bus]
columns.insert(0, 'MessageID')
all_msgs = self.messageIDs()
dfx = | pd.DataFrame(columns=columns) | pandas.DataFrame |
import requests
import pandas as pd
import json
import networkx as nx
BASE_URL = "https://api.nb.no/dhlab"
BASE_URL1 = "https://api.nb.no/dhlab"
NGRAM_API = "https://api.nb.no/dhlab/nb_ngram/ngram/query"
GALAXY_API = "https://api.nb.no/dhlab/nb_ngram_galaxies/galaxies/query"
pd.options.display.max_rows = 100
MAX_CORPUS = 14300 # sjekk opp det her, concordance fungerer ikke med større korpus
import re
# convert cell to a link
def make_link(row):
r = "<a target='_blank' href = 'https://urn.nb.no/{x}'>{x}</a>".format(x = str(row))
return r
# find hits a cell
find_hits = lambda x: ' '.join(re.findall("<b>(.+?)</b", x))
# fetch metadata
def get_metadata(urns = None):
""" Fetch metadata from a list of urns """
params = locals()
r = requests.post(f"{BASE_URL}/get_metadata", json = params)
return pd.DataFrame(r.json())
# class for displaying concordances
class Concordance:
"""Wrapper for concordance function with added functionality"""
def __init__(self, corpus, query):
self.concordance = concordance(
urns = list(
corpus.urn.sample(min(MAX_CORPUS, len(corpus.urn)))
),
words = query)
self.concordance['link'] = self.concordance.urn.apply(make_link)
self.concordance = self.concordance[['link', 'urn', 'conc']]
self.concordance.columns = ['link', 'urn', 'concordance']
self.corpus = corpus
self.size = len(self.concordance)
def show(self, n = 10, style = True):
if style:
result = self.concordance.sample(min(n, self.size))[['link', 'concordance']].style
else:
result = self.concordance.sample(min(n, self.size))
return result
class Cooccurence():
"""Collocations """
def __init__(self, corpus = None, words = None, before = 10, after = 10, reference = None):
if isinstance(words, str):
words = [words]
coll = pd.concat([urn_collocation(urns = list(corpus.urn), word = w, before = before, after = after) for w in words])[['counts']]
self.coll = coll.groupby(coll.index).sum()
self.reference = reference
self.before = before
self.after = after
if reference is not None:
self.coll['relevance'] = (self.coll.counts/self.coll.counts.sum())/(self.reference.freq/self.reference.freq.sum())
def show(self, sortby = 'counts', n = 20):
return self.coll.sort_values(by = sortby, ascending = False)
def keywordlist(self, top = 200, counts = 5, relevance = 10):
mask = self.coll[self.coll.counts > counts]
mask = mask[mask.relevance > relevance]
return list(mask.sort_values(by = 'counts', ascending = False).head(200).index)
def find(self, words):
return self.coll.loc[[w for w in words if w in self.coll.index]]
class Ngram():
def __init__(self, words = None, from_year = None, to_year = None, doctype = None, lang = 'nob'):
from datetime import datetime
self.date = datetime.now()
if to_year is None:
to_year = self.date.year
if from_year is None:
from_year = 1950
self.from_year = from_year
self.to_year = to_year
self.words = words
self.lang = lang
if not doctype is None:
if 'bok' in doctype:
doctype = 'bok'
elif 'avis' in doctype:
doctype = 'avis'
else:
doctype = 'bok'
else:
doctype = 'bok'
ngrm = nb_ngram(terms = ', '.join(words), corpus = doctype, years = (from_year, to_year))
ngrm.index = ngrm.index.astype(str)
self.ngram = ngrm
return None
def plot(self, **kwargs):
self.ngram.plot(**kwargs)
def compare(self, another_ngram):
from datetime import datetime
start_year = max(datetime(self.from_year,1,1), datetime(another_ngram.from_year,1,1)).year
end_year = min(datetime(self.to_year,1,1), datetime(another_ngram.to_year,1,1)).year
compare = (self.ngram.loc[str(start_year):str(end_year)].transpose()/another_ngram.ngram[str(start_year):str(end_year)].transpose().sum()).transpose()
return compare
class Ngram_book(Ngram):
""""""
def __init__(self, words = None, title = None, publisher = None, city = None, lang = 'nob', from_year = None, to_year = None, ddk = None, subject = None):
from datetime import datetime
self.date = datetime.now()
if to_year is None:
to_year = self.date.year
if from_year is None:
from_year = 1950
self.from_year = from_year
self.to_year = to_year
self.words = words
self.title = title
self.publisher = publisher
self.city = city
self.lang = lang
self.ddk = ddk
self.subject = subject
self.ngram = ngram_book(word = words, title = title, publisher = publisher, lang = lang,city = city, period = (from_year, to_year), ddk = ddk, topic = subject)
#self.cohort = (self.ngram.transpose()/self.ngram.transpose().sum()).transpose()
return None
class Ngram_news(Ngram):
def __init__(self, words = None, title = None, city = None, from_year = None, to_year = None):
from datetime import datetime
self.date = datetime.now()
if to_year is None:
to_year = self.date.year
if from_year is None:
from_year = 1950
self.from_year = from_year
self.to_year = to_year
self.words = words
self.title = title
self.ngram = ngram_news(word = words, title = title, period = (from_year, to_year))
#self.cohort = (self.ngram.transpose()/self.ngram.transpose().sum()).transpose()
return None
def get_reference(corpus = 'digavis', from_year = 1950, to_year = 1955, lang = 'nob', limit = 100000):
params = locals()
r = requests.get(BASE_URL + "/reference_corpus", params = params)
if r.status_code == 200:
result = r.json()
else:
result = []
return pd.DataFrame(result, columns = ['word', 'freq']).set_index('word')
def find_urns(docids = None, mode = 'json'):
""" Return a list of URNs from a list of docids as a dictionary {docid: URN} or as a pandas dataframe"""
params = locals()
r = requests.post(BASE_URL1 + "/find_urn", json = params)
if r.status_code == 200:
res = pd.DataFrame.from_dict(r.json(), orient = 'index', columns = ['urn'])
else:
res = pd.DataFrame()
return res
def ngram_book(word = ['.'], title = None, period = None, publisher = None, lang=None, city = None, ddk = None, topic = None):
"""Get a time series for a word as string, title is name of book period is (year, year), lang is three letter iso code.
Use % as wildcard where appropriate - no wildcards in word and lang"""
params = locals()
if isinstance(word, str):
# assume a comma separated string
word = [w.strip() for w in word.split(',')]
params['word'] = tuple(word)
params = {x:params[x] for x in params if not params[x] is None}
r = requests.post(BASE_URL1 + "/ngram_book", json = params)
#print(r.status_code)
df = pd.DataFrame.from_dict(r.json(), orient = 'index')
df.index = df.index.map(lambda x: tuple(x.split()))
columns = df.index.levels[0]
df = pd.concat([df.loc[x] for x in columns], axis = 1)
df.columns = columns
#df.index = df.index.map(pd.Timestamp)
return df
def ngram_periodicals(word = ['.'], title = None, period = None, publisher = None, lang=None, city = None, ddk = None, topic = None):
"""Get a time series for a word as string, title is name of periodical period is (year, year), lang is three letter iso code.
Use % as wildcard where appropriate - no wildcards in word and lang"""
params = locals()
if isinstance(word, str):
# assume a comma separated string
word = [w.strip() for w in word.split(',')]
params['word'] = tuple(word)
params = {x:params[x] for x in params if not params[x] is None}
r = requests.post(BASE_URL1 + "/ngram_periodicals", json = params)
#print(r.status_code)
df = pd.DataFrame.from_dict(r.json(), orient = 'index')
df.index = df.index.map(lambda x: tuple(x.split()))
columns = df.index.levels[0]
df = pd.concat([df.loc[x] for x in columns], axis = 1)
df.columns = columns
#df.index = df.index.map(pd.Timestamp)
return df
def ngram_news(word = ['.'], title = None, period = None):
""" get a time series period is a tuple of (year, year), (yearmonthday, yearmonthday)
word is string and title is the title of newspaper, use % as wildcard"""
params = locals()
if isinstance(word, str):
# assume a comma separated string
word = [w.strip() for w in word.split(',')]
params['word'] = tuple(word)
params = {x:params[x] for x in params if not params[x] is None}
r = requests.post(BASE_URL1 + "/ngram_newspapers", json = params)
#print(r.status_code)
df = pd.DataFrame.from_dict(r.json(), orient = 'index')
df.index = df.index.map(lambda x: tuple(x.split()))
columns = df.index.levels[0]
df = pd.concat([df.loc[x] for x in columns], axis = 1)
df.columns = columns
#df.index = df.index.map(pd.Timestamp)
return df
def get_document_frequencies(urns = None, cutoff = 0, words = None):
params = locals()
r = requests.post(BASE_URL1 + "/frequencies", json = params)
result = r.json()
structure = {u[0][0] : dict([tuple(x[1:3]) for x in u]) for u in result if u != []}
df = pd.DataFrame(structure)
return df.sort_values(by = df.columns[0], ascending = False)
def get_word_frequencies(urns = None, cutoff = 0, words = None):
params = locals()
r = requests.post(BASE_URL1 + "/frequencies", json = params)
result = r.json()
structure = {u[0][0] : dict([(x[1],x[2]/x[3]) for x in u]) for u in result if u != []}
df = pd.DataFrame(structure)
return df.sort_values(by = df.columns[0], ascending = False)
def get_document_corpus(**kwargs):
return document_corpus(**kwargs)
def document_corpus(doctype = None, author = None, freetext = None, from_year = None, to_year = None, from_timestamp = None, to_timestamp = None, title = None, ddk = None, subject = None, lang = None, limit = None):
""" Fetch a corpus based on metadata - doctypes are digibok, digavis, digitidsskrift"""
parms = locals()
params = {x:parms[x] for x in parms if not parms[x] is None }
if "ddk" in params:
params["ddk"] = "^" + params['ddk'].replace('.', '"."')
r=requests.post(BASE_URL + "/build_corpus", json=params)
return pd.DataFrame(r.json())
def urn_collocation(urns = None, word = 'arbeid', before = 5, after = 0, samplesize = 200000):
""" Create a collocation from a list of URNs - returns distance (sum of distances and bayesian distance) and frequency"""
params = {
'urn': urns,
'word': word,
'before': before,
'after': after,
'samplesize': samplesize
}
r = requests.post(BASE_URL1 + "/urncolldist_urn", json = params)
return pd.read_json(r.text)
def totals(n = 50000):
""" Get total frequencies of words in database"""
r = requests.get(BASE_URL + "/totals/{n}".format(n = n))
return pd.DataFrame.from_dict(dict(r.json()),orient = 'index', columns = ['freq'])
def concordance(urns = None, words = None, window = 25, limit = 100):
""" Get a list of concordances from database, words is an fts5 string search expression"""
if words is None:
return {}
else:
params = {
'urns': urns,
'query': words,
'window': window,
'limit': limit
}
r = requests.post(BASE_URL + "/conc", json = params)
return pd.DataFrame(r.json())
def concordance_counts(urns = None, words = None, window = 25, limit = 100):
""" Get a list of concordances from database, words is an fts5 string search expression"""
if words is None:
return {}
else:
params = {
'urns': urns,
'query': words,
'window': window,
'limit': limit
}
r = requests.post(BASE_URL + "/conccount", json = params)
return pd.DataFrame(r.json())
def konkordans(urns = None, query = None, window = 25, limit = 100):
if query is None:
return {}
else:
params = {
'urns': urns,
'query': query,
'window': window,
'limit': limit
}
r = requests.post(BASE_URL + "/conc", json = params)
return pd.DataFrame(r.json())
def collocation(corpusquery = 'norge', word = 'arbeid', before = 5, after = 0):
params = {
'metadata_query': corpusquery,
'word': word,
'before': before,
'after': after
}
r = requests.post(BASE_URL1 + "/urncolldist", json = params)
return | pd.read_json(r.text) | pandas.read_json |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#-------------read csv---------------------
df_2010_2011 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2010_2011.csv")
df_2012_2013 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2012_2013.csv")
df_2014_2015 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2014_2015.csv")
df_2016_2017 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2016_2017.csv")
df_2018_2019 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2018_2019.csv")
df_2010_2011['prcab'].fillna(0)
df_2012_2013['prcab'].fillna(0)
df_2014_2015['prcab'].fillna(0)
df_2016_2017['prcab'].fillna(0)
df_2018_2019['prcab'].fillna(0)
print(df_2018_2019['prcab'])
mask = df_2010_2011['surgyear'] != 2010
df_2011 = df_2010_2011[mask]
df_2010 = df_2010_2011[~mask]
mask2 = df_2012_2013['surgyear'] != 2012
df_2013 = df_2012_2013[mask2]
df_2012 = df_2012_2013[~mask2]
mask3 = df_2014_2015['surgyear'] != 2014
df_2015 = df_2014_2015[mask3]
df_2014 = df_2014_2015[~mask3]
mask4 = df_2016_2017['surgyear'] != 2016
df_2017 = df_2016_2017[mask4]
df_2016 = df_2016_2017[~mask4]
mask5 = df_2018_2019['surgyear'] != 2018
df_2019 = df_2018_2019[mask5]
df_2018 = df_2018_2019[~mask5]
avg_siteid = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_frame(self):
# normal DataFrame
dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
p = pd.period_range('2011-01', freq='M', periods=5)
df = pd.DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
self.assertEqual(repr(df), exp)
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({'int64': np.random.randint(100, size=n)})
df['category'] = Series(np.array(list('abcdefghij')).take(
np.random.randint(0, 10, size=n))).astype('category')
df.isnull()
df.info()
df2 = df[df['category'] == 'd']
df2.info()
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Series(Categorical(["a", "b", "c", "d"], categories=[
'd', 'c', 'b', 'a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Series(Categorical(
[np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
def test_mode(self):
s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5, 1], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True))
res = s.mode()
exp = Series(Categorical([], categories=[5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
s = pd.Series(pd.Categorical(
["a", "b", "c", "c", "c", "b"], categories=["c", "a", "b", "d"]))
res = s.value_counts(sort=False)
exp = Series([3, 1, 2, 0],
index=pd.CategoricalIndex(["c", "a", "b", "d"]))
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp = Series([3, 2, 1, 0],
index=pd.CategoricalIndex(["c", "b", "a", "d"]))
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# https://github.com/pydata/pandas/issues/9443
s = pd.Series(["a", "b", "a"], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
s = pd.Series(["a", "b", None, "a", None, None], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"])))
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
tm.assert_series_equal(
s.value_counts(dropna=False, sort=False),
pd.Series([2, 1, 3],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", "a"], categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1, 0],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", None, "a", None, None], categories=["a", "b", np.nan
]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1],
index=pd.CategoricalIndex([np.nan, "a", "b"])))
def test_groupby(self):
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"
], categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
expected = DataFrame({'a': Series(
[1, 2, 4, np.nan], index=pd.CategoricalIndex(
['a', 'b', 'c', 'd'], name='b'))})
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A")
exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A')
expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers
gb = df.groupby(['A', 'B'])
expected = DataFrame({'values': Series(
[1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan
], index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']], names=['A', 'B']))})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers with a non-cat
df = df.copy()
df['C'] = ['foo', 'bar'] * 2
gb = df.groupby(['A', 'B', 'C'])
expected = DataFrame({'values': Series(
np.nan, index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y'], ['foo', 'bar']
], names=['A', 'B', 'C']))}).sortlevel()
expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4]
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name)
g = x.groupby(['person_id'])
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
# Filter
tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
# GH 9603
df = pd.DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4])
result = df.groupby(c).apply(len)
expected = pd.Series([1, 0, 0, 0],
index=pd.CategoricalIndex(c.values.categories))
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
def test_pivot_table(self):
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'])
expected = Series([1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']],
names=['A', 'B']),
name='values')
tm.assert_series_equal(result, expected)
def test_count(self):
s = Series(Categorical([np.nan, 1, 2, np.nan],
categories=[5, 4, 3, 2, 1], ordered=True))
result = s.count()
self.assertEqual(result, 2)
def test_sort(self):
c = Categorical(["a", "b", "b", "a"], ordered=False)
cat = Series(c)
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
c.order()
# sort in the categories order
expected = Series(
Categorical(["a", "a", "b", "b"],
ordered=False), index=[0, 3, 1, 2])
result = cat.sort_values()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "c", "b", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(Categorical(["a", "c", "b", "d"], categories=[
"a", "b", "c", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res.__array__(), exp)
raw_cat1 = Categorical(["a", "b", "c", "d"],
categories=["a", "b", "c", "d"], ordered=False)
raw_cat2 = Categorical(["a", "b", "c", "d"],
categories=["d", "c", "b", "a"], ordered=True)
s = ["a", "b", "c", "d"]
df = DataFrame({"unsort": raw_cat1,
"sort": raw_cat2,
"string": s,
"values": [1, 2, 3, 4]})
# Cats must be sorted in a dataframe
res = df.sort_values(by=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
self.assertEqual(res["sort"].dtype, "category")
res = df.sort_values(by=["sort"], ascending=False)
exp = df.sort_values(by=["string"], ascending=True)
self.assert_numpy_array_equal(res["values"], exp["values"])
self.assertEqual(res["sort"].dtype, "category")
self.assertEqual(res["unsort"].dtype, "category")
# unordered cat, but we allow this
df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
# GH 7848
df = DataFrame({"id": [6, 5, 4, 3, 2, 1],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"], ordered=True)
df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
# sorts 'grade' according to the order of the categories
result = df.sort_values(by=['grade'])
expected = df.iloc[[1, 2, 5, 0, 3, 4]]
tm.assert_frame_equal(result, expected)
# multi
result = df.sort_values(by=['grade', 'id'])
expected = df.iloc[[2, 1, 5, 4, 3, 0]]
tm.assert_frame_equal(result, expected)
# reverse
cat = Categorical(["a", "c", "c", "b", "d"], ordered=True)
res = cat.sort_values(ascending=False)
exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
# some NaN positions
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
def test_slicing(self):
cat = Series(Categorical([1, 2, 3, 4]))
reversed = cat[::-1]
exp = np.array([4, 3, 2, 1])
self.assert_numpy_array_equal(reversed.__array__(), exp)
df = DataFrame({'value': (np.arange(100) + 1).astype('int64')})
df['D'] = pd.cut(df.value, bins=[0, 25, 50, 75, 100])
expected = Series([11, '(0, 25]'], index=['value', 'D'], name=10)
result = df.iloc[10]
tm.assert_series_equal(result, expected)
expected = DataFrame({'value': np.arange(11, 21).astype('int64')},
index=np.arange(10, 20).astype('int64'))
expected['D'] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100])
result = df.iloc[10:20]
tm.assert_frame_equal(result, expected)
expected = Series([9, '(0, 25]'], index=['value', 'D'], name=8)
result = df.loc[8]
tm.assert_series_equal(result, expected)
def test_slicing_and_getting_ops(self):
# systematically test the slicing operations:
# for all slicing ops:
# - returning a dataframe
# - returning a column
# - returning a row
# - returning a single value
cats = pd.Categorical(
["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 2, 3, 4, 5, 6, 7]
df = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
cats2 = pd.Categorical(["b", "c"], categories=["a", "b", "c"])
idx2 = pd.Index(["j", "k"])
values2 = [3, 4]
# 2:4,: | "j":"k",:
exp_df = pd.DataFrame({"cats": cats2, "values": values2}, index=idx2)
# :,"cats" | :,0
exp_col = pd.Series(cats, index=idx, name='cats')
# "j",: | 2,:
exp_row = pd.Series(["b", 3], index=["cats", "values"], dtype="object",
name="j")
# "j","cats | 2,0
exp_val = "b"
# iloc
# frame
res_df = df.iloc[2:4, :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.iloc[2, :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.iloc[2, 0]
self.assertEqual(res_val, exp_val)
# loc
# frame
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.loc["j", "cats"]
self.assertEqual(res_val, exp_val)
# ix
# frame
# res_df = df.ix["j":"k",[0,1]] # doesn't work?
res_df = df.ix["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.ix["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.ix[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.ix["j", 0]
self.assertEqual(res_val, exp_val)
# iat
res_val = df.iat[2, 0]
self.assertEqual(res_val, exp_val)
# at
res_val = df.at["j", "cats"]
self.assertEqual(res_val, exp_val)
# fancy indexing
exp_fancy = df.iloc[[2]]
res_fancy = df[df["cats"] == "b"]
tm.assert_frame_equal(res_fancy, exp_fancy)
res_fancy = df[df["values"] == 3]
tm.assert_frame_equal(res_fancy, exp_fancy)
# get_value
res_val = df.get_value("j", "cats")
self.assertEqual(res_val, exp_val)
# i : int, slice, or sequence of integers
res_row = df.iloc[2]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
res_df = df.iloc[slice(2, 4)]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[[2, 3]]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
res_df = df.iloc[:, slice(0, 2)]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[:, [0, 1]]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
def test_slicing_doc_examples(self):
# GH 7918
cats = Categorical(
["a", "b", "b", "b", "c", "c", "c"], categories=["a", "b", "c"])
idx = Index(["h", "i", "j", "k", "l", "m", "n", ])
values = [1, 2, 2, 2, 3, 4, 5]
df = DataFrame({"cats": cats, "values": values}, index=idx)
result = df.iloc[2:4, :]
expected = DataFrame(
{"cats": Categorical(
['b', 'b'], categories=['a', 'b', 'c']),
"values": [2, 2]}, index=['j', 'k'])
tm.assert_frame_equal(result, expected)
result = df.iloc[2:4, :].dtypes
expected = Series(['category', 'int64'], ['cats', 'values'])
tm.assert_series_equal(result, expected)
result = df.loc["h":"j", "cats"]
expected = Series(Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c']),
index=['h', 'i', 'j'], name='cats')
tm.assert_series_equal(result, expected)
result = df.ix["h":"j", 0:1]
expected = DataFrame({'cats': Series(
Categorical(
['a', 'b', 'b'], categories=['a', 'b', 'c']), index=['h', 'i',
'j'])})
tm.assert_frame_equal(result, expected)
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
cats = pd.Categorical(
["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
# changed single row
cats1 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx1 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = pd.DataFrame(
{"cats": cats1,
"values": values1}, index=idx1)
# changed multiple rows
cats2 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx2 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = pd.DataFrame(
{"cats": cats2,
"values": values2}, index=idx2)
# changed part of the cats column
cats3 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = pd.DataFrame(
{"cats": cats3,
"values": values3}, index=idx3)
# changed single value in cats col
cats4 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx4 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values4 = [1, 1, 1, 1, 1, 1, 1]
exp_single_cats_value = pd.DataFrame(
{"cats": cats4,
"values": values4}, index=idx4)
# iloc
# ###############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iloc[2, 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2, :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.iloc[2, :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4, :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.iloc[2:4, :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
| tm.assert_frame_equal(df, exp_parts_cats_col) | pandas.util.testing.assert_frame_equal |
__author__ = 'lucabasa'
__version__ = 1.0
__status__ = "development"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import gc
from sklearn.metrics import mean_squared_error, log_loss
import lightgbm as lgb
from sklearn.model_selection import KFold, StratifiedKFold
from utilities import reduce_mem_usage
def read_data(input_file):
df = pd.read_csv(input_file)
df['first_active_month'] = | pd.to_datetime(df['first_active_month']) | pandas.to_datetime |
# 1. Use Linear Regressor class in TensorFlow to predict median housing price, at the granularity of city blocks,
# based on one input feature.
# 2. Evaluate the accuracy of a model's predictions using Root Mean Squared Error (RMSE).
# 3. Improve the accuracy of a model by tuning its hyperparameters.
from __future__ import print_function
import math
from IPython import display
from matplotlib import cm
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
"""
Step 1: Setup
"""
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
california_housing_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv",
sep=","
)
# Randomize the data, to be sure not to get any pathological ordering effects that might harm the performance of
# Stochastic Gradient Descent. Additionally, we'll scale `median_house_value` to be in units of thousands, so it can be
# learned a little more easily with learning rates in a range that we usually use.
california_housing_dataframe = california_housing_dataframe.reindex(
np.random.permutation(california_housing_dataframe.index))
california_housing_dataframe["median_house_value"] /= 1000.0
california_housing_dataframe
"""
Step 2: Examine Data
"""
# Print out statistics on each column: count of examples, mean, standard deviation, max, min, and various quantiles.
california_housing_dataframe.describe()
"""
Step 3: Build First Model
"""
# Predict `median_house_value`, which will be label. Use `total_rooms` as our input feature.
# To train model, use Linear Regressor interface provided by TensorFlow Estimator API.
"""
Step 3.1: Define Features and Configure Feature Columns
"""
# In order to import training data into TensorFlow, need to specify what type of data each feature contains.
# There are two main types of data we'll use:
# 1. Categorical Data: Data that is textual. Housing data set does not contain any categorical features, but examples
# would be the home style, the words in a real-estate ad.
# 2. Numerical Data: Data that is a number and that you want to treat as a number.
#
# In TensorFlow, we indicate a feature's data type using a construct called a feature column. Feature columns store only
# a description of the feature data; they do not contain the feature data itself.
#
# To start, we're going to use one numeric input feature, `total_rooms`. The following code pulls `total_rooms` data
# from `california_housing_dataframe` and defines the feature column using `numeric_column`, which specifies its data is
# numeric:
# Define the input feature: total_rooms.
my_feature = california_housing_dataframe[["total_rooms"]]
# Configure a numeric feature column for total_rooms.
feature_columns = [tf.feature_column.numeric_column("total_rooms")]
"""
Step 3.2: Define the Target
"""
# Next, we'll define our target, which is `median_house_value`.
# Define the label.
targets = california_housing_dataframe["median_house_value"]
"""
Step 3.3: Configure the Linear Regressor
"""
# Next, configure a linear regression model using Linear Regressor. Train this model using `GradientDescentOptimizer`,
# which implements Mini-Batch Stochastic Gradient Descent (SGD). `learning_rate` controls the size of gradient step.
# Use gradient descent as the optimizer for training the model.
my_optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.0000001)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
# Configure the linear regression model with our feature columns and optimizer.
# Set a learning rate of 0.0000001 for Gradient Descent.
linear_regressor = tf.estimator.LinearRegressor(
feature_columns=feature_columns,
optimizer=my_optimizer
)
"""
Step 3.4: Define the Input Function
"""
# To import California housing data into Linear Regressor, need to define input function, which instructs TensorFlow
# how to preprocess data, as well as how to batch, shuffle, and repeat it during model training.
# First, convert pandas feature data into a dict of NumPy arrays. Then use TensorFlow Dataset API to construct a
# dataset object from data, and then break data into batches of `batch_size`, to be repeated for specified number of
# epochs (num_epochs).
# NOTE: When default value of `num_epochs=None` is passed to `repeat()`, the input data will be repeated indefinitely.
# Next, if `shuffle` is set to `True`, we'll shuffle data so that it's passed to the model randomly during training.
# The `buffer_size` argument specifies the size of dataset from which `shuffle` will randomly sample.
# Finally, input function constructs an iterator for dataset and returns next batch of data to Linear Regressor.
def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
"""Trains a linear regression model of one feature.
Args:
features: pandas DataFrame of features
targets: pandas DataFrame of targets
batch_size: Size of batches to be passed to the model
shuffle: True or False. Whether to shuffle the data.
num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
Returns:
Tuple of (features, labels) for next data batch
"""
# Convert pandas data into a dict of np arrays.
features = {key:np.array(value) for key,value in dict(features).items()}
# Construct a dataset, and configure batching/repeating.
ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
# Shuffle the data, if specified.
if shuffle:
ds = ds.shuffle(buffer_size=10000)
# Return the next batch of data.
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
"""
Step 3.5: Train Model
"""
# Now call `train()` on `linear_regressor` to train the model. Wrap `my_input_fn` in a `lambda so we can pass in
# `my_feature` and `target` as arguments, and to start, we'll train for 100 steps.
_ = linear_regressor.train(
input_fn=lambda: my_input_fn(my_feature, targets),
steps=100
)
"""
Step 3.6: Evaluate Model
"""
# Create an input function for predictions.
prediction_input_fn = lambda: my_input_fn(my_feature, targets, num_epochs=1, shuffle=False)
# Call predict() on the linear_regressor to make predictions.
predictions = linear_regressor.predict(input_fn=prediction_input_fn)
# Format predictions as a NumPy array, so we can calculate error metrics.
predictions = np.array([item['predictions'][0] for item in predictions])
# Print Mean Squared Error and Root Mean Squared Error.
mean_squared_error = metrics.mean_squared_error(predictions, targets)
root_mean_squared_error = math.sqrt(mean_squared_error)
print("Mean Squared Error (on training data): %0.3f" % mean_squared_error)
print("Root Mean Squared Error (on training data): %0.3f" % root_mean_squared_error)
# Mean Squared Error (MSE) can be hard to interpret, so often look at Root Mean Squared Error (RMSE) instead.
min_house_value = california_housing_dataframe["median_house_value"].min()
max_house_value = california_housing_dataframe["median_house_value"].max()
min_max_difference = max_house_value - min_house_value
print("Min. Median House Value: %0.3f" % min_house_value)
print("Max. Median House Value: %0.3f" % max_house_value)
print("Difference between Min. and Max.: %0.3f" % min_max_difference)
print("Root Mean Squared Error: %0.3f" % root_mean_squared_error)
# Now to improve result.
calibration_data = pd.DataFrame()
calibration_data["predictions"] = pd.Series(predictions)
calibration_data["targets"] = pd.Series(targets)
calibration_data.describe()
# Can also visualize data and the line we've learned. Recall linear regression on a single feature can be drawn as
# a line mapping input x to output y.
# First, get a uniform random sample of the data so we can make a readable scatter plot.
sample = california_housing_dataframe.sample(n=300)
# Next, plot the line we've learned, drawing from the model's bias term and feature weight, together with the scatter
# plot. The line will show up red.
# Get the min and max total_rooms values.
x_0 = sample["total_rooms"].min()
x_1 = sample["total_rooms"].max()
# Retrieve the final weight and bias generated during training.
weight = linear_regressor.get_variable_value('linear/linear_model/total_rooms/weights')[0]
bias = linear_regressor.get_variable_value('linear/linear_model/bias_weights')
# Get the predicted median_house_values for the min and max total_rooms values.
y_0 = weight * x_0 + bias
y_1 = weight * x_1 + bias
# Plot our regression line from (x_0, y_0) to (x_1, y_1).
plt.plot([x_0, x_1], [y_0, y_1], c='r')
# Label the graph axes.
plt.ylabel("median_house_value")
plt.xlabel("total_rooms")
# Plot a scatter plot from our data sample.
plt.scatter(sample["total_rooms"], sample["median_house_value"])
# Display graph.
plt.show()
"""
Step 4: Tweak the Model Hyperparameters
"""
# In this function, we'll proceed in 10 evenly divided periods so that we can observe the model improvement at each
# period. For each period, we'll compute and graph training loss. This may help you judge when a model is converged,
# or if it needs more iterations. Also plot the feature weight and bias term values learned by the model over time.
def train_model(learning_rate, steps, batch_size, input_feature="total_rooms"):
"""Trains a linear regression model of one feature.
Args:
learning_rate: A `float`, the learning rate.
steps: A non-zero `int`, the total number of training steps. A training step consists of a forward and backward
pass using a single batch.
batch_size: A non-zero `int`, the batch size.
input_feature: A `string` specifying a column from `california_housing_dataframe`
to use as input feature.
"""
periods = 10
steps_per_period = steps / periods
my_feature = input_feature
my_feature_data = california_housing_dataframe[[my_feature]]
my_label = "median_house_value"
targets = california_housing_dataframe[my_label]
# Create feature columns.
feature_columns = [tf.feature_column.numeric_column(my_feature)]
# Create input functions.
training_input_fn = lambda:my_input_fn(my_feature_data, targets, batch_size=batch_size)
prediction_input_fn = lambda: my_input_fn(my_feature_data, targets, num_epochs=1, shuffle=False)
# Create a linear regressor object.
my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
linear_regressor = tf.estimator.LinearRegressor(
feature_columns=feature_columns,
optimizer=my_optimizer
)
# Set up to plot the state of our model's line each period.
plt.figure(figsize=(15, 6))
plt.subplot(1, 2, 1)
plt.title("Learned Line by Period")
plt.ylabel(my_label)
plt.xlabel(my_feature)
sample = california_housing_dataframe.sample(n=300)
plt.scatter(sample[my_feature], sample[my_label])
colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, periods)]
# Train model, but do so inside a loop so that we can periodically assess loss metrics.
print("Training model...")
print("RMSE (on training data):")
root_mean_squared_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
linear_regressor.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute predictions.
predictions = linear_regressor.predict(input_fn=prediction_input_fn)
predictions = np.array([item['predictions'][0] for item in predictions])
# Compute loss.
root_mean_squared_error = math.sqrt(metrics.mean_squared_error(predictions, targets))
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, root_mean_squared_error))
# Add the loss metrics from this period to our list.
root_mean_squared_errors.append(root_mean_squared_error)
# Finally, track the weights and biases over time.
# Apply some math to ensure that the data and line are plotted neatly.
y_extents = np.array([0, sample[my_label].max()])
weight = linear_regressor.get_variable_value('linear/linear_model/%s/weights' % input_feature)[0]
bias = linear_regressor.get_variable_value('linear/linear_model/bias_weights')
x_extents = (y_extents - bias) / weight
x_extents = np.maximum(np.minimum(x_extents, sample[my_feature].max()), sample[my_feature].min())
y_extents = weight * x_extents + bias
plt.plot(x_extents, y_extents, color=colors[period])
print("Model training finished.")
# Output a graph of loss metrics over periods.
plt.subplot(1, 2, 2)
plt.ylabel('RMSE')
plt.xlabel('Periods')
plt.title("Root Mean Squared Error vs. Periods")
plt.tight_layout()
plt.plot(root_mean_squared_errors)
# Output a table with calibration data.
calibration_data = pd.DataFrame()
calibration_data["predictions"] = pd.Series(predictions)
calibration_data["targets"] = | pd.Series(targets) | pandas.Series |
import pandas as pd
import os
import json
TOPDIR = "/mnt/c/Users/aotubusen/Documents/DS Projects/future_news_fund/data/raw/opname_csv_gb"
HEADER_PATH = os.path.join(TOPDIR,'DOC','OS_Open_Names_Header.csv')
def extract_tree(df, levels, ilvl=0):
lvl = levels[ilvl]
entities = []
for group, grouped in df.groupby(lvl):
try:
children = extract_tree(grouped, levels, ilvl+1)
except IndexError:
children = grouped[['LOCAL_TYPE','NAME1']].to_dict(orient='records')
children = [{'type': child['LOCAL_TYPE'], 'name':child['NAME1']}
for child in children]
entity = {'type':lvl, 'name':group}
if len(children) > 0:
entity['contains'] = children
entities.append(entity)
return entities
header = list(pd.read_csv(HEADER_PATH).columns)
data = []
cols = ['NAME1','LOCAL_TYPE','POSTCODE_DISTRICT','DISTRICT_BOROUGH','COUNTY_UNITARY','REGION','COUNTRY']
for filename in os.listdir(os.path.join(TOPDIR,'DATA')):
path = os.path.join(TOPDIR,'DATA',filename)
df = | pd.read_csv(path, header=None, names=header, low_memory=False) | pandas.read_csv |
import settings
import pandas as pd
from dskc._util import df_to_list_w_column_idx
from src.data_curation.datasets import pledges_clean
import numpy as np
def pledges_merge(df,pledges_path):
df_pledges = pd.read_csv(pledges_path,index_col=None)
df_pledges = pledges_clean(df_pledges)
df = pd.merge(df, df_pledges,
suffixes=('_Projects', '_Pledges'),
left_on='PID',
right_on='PID',
how='left',
validate="1:m")
df.sort_values(by=['PID', 'PAYDATE'], inplace=True)
return feature_creation(df)
def _set_rewards(data, c_idx, rewards, amount, index, last_is_same_prj):
# for each reward
for i, r in enumerate(rewards):
if amount >= r:
# can be the next reward
if i + 1 < len(rewards) and amount >= rewards[i + 1]:
continue
# add reward
if last_is_same_prj:
n_pledges = data[index - 1][c_idx["REWARD_SLOT_{}_N_PLEDGES".format(i + 1)]] + 1
else:
n_pledges = 1
# set reward
data[index][c_idx["REWARD_SLOT_{}_N_PLEDGES".format(i + 1)]] = n_pledges
break
def _set_percentage_self_funded(data, c_idx, index, last_is_same_prj):
# if pledge has same UID that project
if data[index][c_idx["UID_Projects"]] == data[index][c_idx["UID_Pledges"]]:
amount_self_funded = 0
# sum the last self funded
if last_is_same_prj:
amount_self_funded += data[index - 1][c_idx["AMOUNT_SELF_FUNDED"]]
amount_self_funded += data[index][c_idx["AMOUNT"]]
# set amount
data[index][c_idx["AMOUNT_SELF_FUNDED"]] = amount_self_funded
def feature_creation(df):
# set new features default
df['RAISED'] = 0
df["BACKERS"] = 0
df['DAYS_ELAPSED'] = 0
df["AMOUNT_SELF_FUNDED"] = 0
data, c_idx, columns = df_to_list_w_column_idx(df)
rewards = []
n_backers = 0
raised = 0
# for each pledge
for i in range(len(data)):
amount = data[i][c_idx['AMOUNT']]
# project without pledges
if np.isnan(amount):
continue
if i == 0 or data[i][c_idx['PID']] != data[i - 1][c_idx['PID']]: # new project
# set rewards
rewards = []
n_rewards = data[i][c_idx["N_REWARDS"]]
for j in range(n_rewards):
rewards.append(data[i][c_idx["REWARD_SLOT_{}_AMOUNT".format(j + 1)]])
raised = amount
n_backers = 1
last_is_same_prj = False
else:
raised += amount
n_backers += 1
last_is_same_prj = True
_set_percentage_self_funded(data, c_idx, i, last_is_same_prj)
#_set_rewards(data, c_idx, rewards, amount, i, last_is_same_prj)
# set calculated variables
data[i][c_idx["BACKERS"]] = n_backers
data[i][c_idx['RAISED']] = raised
days = int((data[i][c_idx['PAYDATE']] - data[i][c_idx['START']]) / 86400) # divide all seconds in a day
data[i][c_idx['DAYS_ELAPSED']] = days
return | pd.DataFrame(data, columns=columns) | pandas.DataFrame |
"""
---------------------------------------------------
----------------- MODULE: Trainer -----------------
---------------------------------------------------
This must-use module has excellent classes and
methods for complete training and evaluating
Machine Learning models with just few lines of code.
It really encapsulates almost all the hard work of
data scientists and data analysts by presenting
classes with methods for training, optimizing
models hyperparemeters, generating performance
reports, plotting feature importances, confusion
matrix, roc curves and much more!
Table of Contents
---------------------------------------------------
1. Initial setup
1.1 Importing libraries
1.2 Setting up logging
1.3 Functions for formatting charts
1.4 Functions for saving objects
2. Classification
2.1 Binary classification
2.2 Multiclass classification
3. Regression
2.1 Linear regression
---------------------------------------------------
"""
# Author: <NAME>
# Date: 13/04/2021
"""
---------------------------------------------------
---------------- 1. INITIAL SETUP -----------------
1.1 Importing libraries
---------------------------------------------------
"""
# Standard libraries
import pandas as pd
import numpy as np
import time
from datetime import datetime
import itertools
from math import ceil
import os
from os import makedirs, getcwd
from os.path import isdir, join
# Machine Learning
import joblib
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import cross_val_score, cross_val_predict, learning_curve
from sklearn.metrics import roc_auc_score, accuracy_score, precision_score, recall_score, \
f1_score, confusion_matrix, roc_curve, mean_absolute_error, \
mean_squared_error, r2_score
from sklearn.metrics import classification_report
from sklearn.exceptions import NotFittedError
import shap
# Viz
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.patches import Patch
from matplotlib.axes import Axes
import seaborn as sns
# For AnnotateBars class
from dataclasses import dataclass
from typing import *
# Logging
import logging
"""
---------------------------------------------------
---------------- 1. INITIAL SETUP -----------------
1.2 Setting up logging
---------------------------------------------------
"""
# Function for a useful log configuration
def log_config(logger, level=logging.DEBUG,
log_format='%(levelname)s;%(asctime)s;%(filename)s;%(module)s;%(lineno)d;%(message)s',
log_filepath=os.path.join(os.getcwd(), 'exec_log/execution_log.log'),
flag_file_handler=False, flag_stream_handler=True, filemode='a'):
"""
Uses a logging object for applying basic configuration on it
Parameters
----------
:param logger: logger object created on module scope [type: logging.getLogger()]
:param level: logger object level [type: level, default=logging.DEBUG]
:param log_format: logger format to be stored [type: string]
:param log_filepath: path where .log file will be stored [type: string, default='log/application_log.log']
:param flag_file_handler: flag for saving .log file [type: bool, default=False]
:param flag_stream_handler: flag for log verbosity on cmd [type: bool, default=True]
:param filemode: write type on the log file [type: string, default='a' (append)]
Return
------
:return logger: pre-configured logger object
"""
# Setting level for the logger object
logger.setLevel(level)
# Creating a formatter
formatter = logging.Formatter(log_format, datefmt='%Y-%m-%d %H:%M:%S')
# Creating handlers
if flag_file_handler:
log_path = '/'.join(log_filepath.split('/')[:-1])
if not isdir(log_path):
makedirs(log_path)
# Adding file_handler
file_handler = logging.FileHandler(log_filepath, mode=filemode, encoding='utf-8')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
if flag_stream_handler:
# Adding stream_handler
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
return logger
# Setting up logger object
logger = logging.getLogger(__file__)
logger = log_config(logger)
"""
---------------------------------------------------
---------------- 1. INITIAL SETUP -----------------
1.3 Functions for formatting charts
---------------------------------------------------
"""
# Formatting spines in a matplotlib plot
def format_spines(ax, right_border=False):
"""
Modify borders and axis colors of matplotlib figures
Parameters
----------
:param ax: figura axis created using matplotlib [type: matplotlib.pyplot.axes]
:param right_border: boolean flag for hiding right border [type: bool, default=False]
Return
------
This functions has no return besides the customization of matplotlib axis
Example
-------
fig, ax = plt.subplots()
format_spines(ax=ax, right_border=False)
"""
# Setting colors on the axis
ax.spines['bottom'].set_color('#CCCCCC')
ax.spines['left'].set_color('#CCCCCC')
ax.spines['top'].set_visible(False)
# Right border formatting
if right_border:
ax.spines['right'].set_color('#CCCCCC')
else:
ax.spines['right'].set_color('#FFFFFF')
ax.patch.set_facecolor('#FFFFFF')
# Reference: https://towardsdatascience.com/annotating-bar-charts-and-other-matplolib-techniques-cecb54315015
# Creating allias
#Patch = matplotlib.patches.Patch
PosVal = Tuple[float, Tuple[float, float]]
#Axis = matplotlib.axes.Axes
Axis = Axes
PosValFunc = Callable[[Patch], PosVal]
@dataclass
class AnnotateBars:
font_size: int = 10
color: str = "black"
n_dec: int = 2
def horizontal(self, ax: Axis, centered=False):
def get_vals(p: Patch) -> PosVal:
value = p.get_width()
div = 2 if centered else 1
pos = (
p.get_x() + p.get_width() / div,
p.get_y() + p.get_height() / 2,
)
return value, pos
ha = "center" if centered else "left"
self._annotate(ax, get_vals, ha=ha, va="center")
def vertical(self, ax: Axis, centered:bool=False):
def get_vals(p: Patch) -> PosVal:
value = p.get_height()
div = 2 if centered else 1
pos = (p.get_x() + p.get_width() / 2,
p.get_y() + p.get_height() / div
)
return value, pos
va = "center" if centered else "bottom"
self._annotate(ax, get_vals, ha="center", va=va)
def _annotate(self, ax, func: PosValFunc, **kwargs):
cfg = {"color": self.color,
"fontsize": self.font_size, **kwargs}
for p in ax.patches:
value, pos = func(p)
ax.annotate(f"{value:.{self.n_dec}f}", pos, **cfg)
"""
---------------------------------------------------
---------------- 1. INITIAL SETUP -----------------
1.3 Functions for saving objects
---------------------------------------------------
"""
# Saving DataFrames on csv format
def save_data(data, output_path, filename):
"""
Método responsável por salvar objetos DataFrame em formato csv.
Parameters
----------
:param data: data to be saved [type: pd.DataFrame]
:param output_path: path reference for the file [type: string]
:param filename: filename for the file with .csv extension [type: string]
Return
------
There is no return besides the file saved on local machine
Application
-----------
df = file_generator_method()
save_result(df, output_path=OUTPUT_PATH, filename='arquivo.csv')
"""
# Searching if directory exists
if not os.path.isdir(output_path):
logger.warning(f'Path {output_path} not exists. Creating directory on the path')
try:
os.makedirs(output_path)
except Exception as e:
logger.error(f'Error on training to create directory {output_path}. Exception: {e}')
return
logger.debug(f'Saving file on directory')
try:
output_file = os.path.join(output_path, filename)
data.to_csv(output_file, index=False)
except Exception as e:
logger.error(f'Error on saving file {filename}. Exception: {e}')
# Saving model in pkl format
def save_model(model, output_path, filename):
"""
Saves trained model and pipelines on pkl format
Parameter
---------
:param model: model object to be saved [type: model]
:param output_path: path reference for the file [type: string]
:param filename: filename for the file with .csv extension [type: string]
Return
------
There is no return besides the object saved on local machine
Application
-----------
model = classifiers['estimator']
save_model(model, output_path=OUTPUT_PATH, filename='model.pkl')
"""
# Searching if directory exists
if not os.path.isdir(output_path):
logger.warning(f'Path {output_path} not exists. Creating directory on the path')
try:
os.makedirs(output_path)
except Exception as e:
logger.error(f'Error on training to create directory {output_path}. Exception: {e}')
return
logger.debug(f'Saving pkl file on directory')
try:
output_file = os.path.join(output_path, filename)
joblib.dump(model, output_file)
except Exception as e:
logger.error(f'Error on saving model {filename}. Exception: {e}')
# Saving figures generated from matplotlib
def save_fig(fig, output_path, img_name, tight_layout=True, dpi=300):
"""
Saves figures created from matplotlib/seaborn
Parameters
----------
:param fig: figure created using matplotlib [type: plt.figure]
:param output_file: path for image to be saved (path + filename in png format) [type: string]
:param tight_layout: flag for tighting figure layout before saving it [type: bool, default=True]
:param dpi: image resolution [type: int, default=300]
Return
------
This function returns nothing besides the image saved on the given path
Application
---------
fig, ax = plt.subplots()
save_fig(fig, output_file='image.png')
"""
# Searching for the existance of the directory
if not os.path.isdir(output_path):
logger.warning(f'Directory {output_path} not exists. Creating a directory on the given path')
try:
os.makedirs(output_path)
except Exception as e:
logger.error(f'Error on creating the directory {output_path}. Exception: {e}')
return
# Tighting layout
if tight_layout:
fig.tight_layout()
logger.debug('Saving image on directory')
try:
output_file = os.path.join(output_path, img_name)
fig.savefig(output_file, dpi=300)
logger.info(f'Image succesfully saved in {output_file}')
except Exception as e:
logger.error(f'Error on saving image. Exception: {e}')
return
"""
---------------------------------------------------
---------------- 2. CLASSIFICATION ----------------
2.1 Binary Classification
---------------------------------------------------
"""
class BinaryClassifier:
"""
Trains and evaluate binary classification models.
The methods of this class enable a complete management of
binary classification tasks in every step of the development
workflow
"""
def __init__(self):
self.classifiers_info = {}
def fit(self, set_classifiers, X_train, y_train, **kwargs):
"""
Trains each classifier in set_classifiers dictionary through a defined setup
Parameters
----------
:param set_classifiers: contains the setup for training the models [type: dict]
set_classifiers = {
'model_name': {
'model': __estimator__,
'params': __estimator_params__
}
}
:param X_train: features for training data [type: np.array]
:param y_train: target array for training data [type: np.array]
:param **kwargs: additional parameters
:arg approach: sufix string for identifying a different approach for models training [type: string, default='']
:arg random_search: boolean flag for applying RandomizedSearchCV on training [type: bool, default=False]
:arg scoring: optimization metric for RandomizedSearchCV (if applicable) [type: string, default='accuracy']
:arg cv: K-folds used on cross validation evaluation on RandomizerSearchCV [type: int, default=5]
:arg verbose: verbosity configured on hyperparameters search [type: int, default=-1]
:arg n_jobs: CPUs vcores to be used during hyperparameters search [type: int, default=1]
:arg save: flag for saving pkl/joblib files for trained models on local disk [type: bool, default=True]
:arg output_path: folder path for pkl/joblib files to be saved [type: string, default=cwd() + 'output/models']
:arg model_ext: extension for model files (pkl or joblib) without point "." [type: string, default='pkl']
Return
------
This method doesn't return anything but the set of self.classifiers_info class attribute with useful info
Application
-----------
# Initializing object and training models
trainer = BinaryClassifier()
trainer.fit(set_classifiers, X_train_prep, y_train)
"""
# Extracting approach from kwargs dictionary
approach = kwargs['approach'] if 'approach' in kwargs else ''
# Iterating over each model on set_classifiers dictionary
try:
for model_name, model_info in set_classifiers.items():
# Defining a custom key for the further classifiers_info class attribute dictionary
model_key = model_name + approach
logger.debug(f'Training model {model_key}')
model = model_info['model']
# Creating an empty dictionary for storing model's info
self.classifiers_info[model_key] = {}
# Validating the application of random search for hyperparameter tunning
try:
if 'random_search' in kwargs and bool(kwargs['random_search']):
params = model_info['params']
# Returning additional parameters from kwargs dictionary
scoring = kwargs['scoring'] if 'scoring' in kwargs else 'accuracy'
cv = kwargs['cv'] if 'cv' in kwargs else 5
verbose = kwargs['verbose'] if 'verbose' in kwargs else -1
n_jobs = kwargs['n_jobs'] if 'n_jobs' in kwargs else 1
# Preparing and applying search
rnd_search = RandomizedSearchCV(model, params, scoring=scoring, cv=cv,
verbose=verbose, random_state=42, n_jobs=n_jobs)
logger.debug('Applying RandomizedSearchCV')
rnd_search.fit(X_train, y_train)
# Saving the best model on classifiers_info class dictionary
self.classifiers_info[model_key]['estimator'] = rnd_search.best_estimator_
else:
# Training model without searching for best hyperparameters
self.classifiers_info[model_key]['estimator'] = model.fit(X_train, y_train)
except TypeError as te:
logger.error(f'Error when trying RandomizedSearch. Exception: {te}')
return
# Saving pkl files if applicable
if 'save' in kwargs and bool(kwargs['save']):
model_ext = kwargs['model_ext'] if 'model_ext' in kwargs else 'pkl'
logger.debug(f'Saving model file for {model_name} on {model_ext} format')
model = self.classifiers_info[model_key]['estimator']
output_path = kwargs['output_path'] if 'output_path' in kwargs else os.path.join(os.getcwd(), 'output/models')
anomesdia = datetime.now().strftime('%Y%m%d')
filename = model_name.lower() + '_' + anomesdia + '.' + model_ext
save_model(model, output_path=output_path, filename=filename)
except AttributeError as ae:
logger.error(f'Error when training models. Exception: {ae}')
def compute_train_performance(self, model_name, estimator, X, y, cv=5):
"""
Applies cross validation for returning the main binary classification metrics for trained models.
In practice, this method is usually applied on a top layer of the class, or in other words, it is usually
executed by another method for extracting metrics on training and validating data
Parameters
----------
:param model_name: model key on self.classifiers_info class attribute [type: string]
:param estimator: model estimator to be evaluated [type: object]
:param X: model features for training data [type: np.array]
:param y: target array for training data [type: np.array]
:param cv: K-folds used on cross validation step [type: int, default=5]
Return
------
:return train_performance: dataset with metrics computed using cross validation on training set [type: pd.DataFrame]
Application
-----------
# Initializing and training models
trainer = BinaryClassifier()
trainer.fit(model, X_train, y_train)
train_performance = trainer.compute_train_performance(model_name, estimator, X_train, y_train)
"""
# Computing metrics using cross validation
logger.debug(f'Computing metrics on {model_name} using cross validation with {cv} K-folds')
try:
t0 = time.time()
accuracy = cross_val_score(estimator, X, y, cv=cv, scoring='accuracy').mean()
precision = cross_val_score(estimator, X, y, cv=cv, scoring='precision').mean()
recall = cross_val_score(estimator, X, y, cv=cv, scoring='recall').mean()
f1 = cross_val_score(estimator, X, y, cv=cv, scoring='f1').mean()
# Computing probabilities for AUC metrics
try:
y_scores = cross_val_predict(estimator, X, y, cv=cv, method='decision_function')
except:
# Tree models don't have decision_function() method but predict_proba()
y_probas = cross_val_predict(estimator, X, y, cv=cv, method='predict_proba')
y_scores = y_probas[:, 1]
auc = roc_auc_score(y, y_scores)
# Saving metrics on self.classifiers_info class attribute
self.classifiers_info[model_name]['train_scores'] = y_scores
# Creating a DataFrame with performance result
t1 = time.time()
delta_time = t1 - t0
train_performance = {}
train_performance['model'] = model_name
train_performance['approach'] = f'Train {cv} K-folds'
train_performance['acc'] = round(accuracy, 4)
train_performance['precision'] = round(precision, 4)
train_performance['recall'] = round(recall, 4)
train_performance['f1'] = round(f1, 4)
train_performance['auc'] = round(auc, 4)
train_performance['total_time'] = round(delta_time, 3)
logger.info(f'Sucessfully computed metrics on training data in {round(delta_time, 3)} seconds')
return pd.DataFrame(train_performance, index=train_performance.keys()).reset_index(drop=True).loc[:0, :]
except Exception as e:
logger.error(f'Error on computing metrics. Exception: {e}')
def compute_val_performance(self, model_name, estimator, X, y):
"""
Computes metrics on validation datasets for binary classifiers.
In practice, this method is usually applied on a top layer of the class, or in other words, it is usually
executed by another method for extracting metrics on training and validating data
Parameters
----------
:param model_name: model key on self.classifiers_info class attribute [type: string]
:param estimator: model estimator to be evaluated [type: object]
:param X: model features for validation data [type: np.array]
:param y: target array for validation data [type: np.array]
Return
------
:return val_performance: dataset with metrics computed on validation set [type: pd.DataFrame]
Application
-----------
# Initializing and training models
trainer = BinaryClassifier()
trainer.fit(model, X_train, y_train)
val_performance = trainer.compute_val_performance(model_name, estimator, X_val, y_val)
"""
# Computing metrics
logger.debug(f'Computing metrics on {model_name} using validation data')
try:
t0 = time.time()
y_pred = estimator.predict(X)
y_proba = estimator.predict_proba(X)
y_scores = y_proba[:, 1]
# Retrieving metrics using validation data
accuracy = accuracy_score(y, y_pred)
precision = precision_score(y, y_pred)
recall = recall_score(y, y_pred)
f1 = f1_score(y, y_pred)
auc = roc_auc_score(y, y_scores)
# Saving probabilities on treined classifiers dictionary
self.classifiers_info[model_name]['val_scores'] = y_scores
# Creating a DataFrame with metrics
t1 = time.time()
delta_time = t1 - t0
test_performance = {}
test_performance['model'] = model_name
test_performance['approach'] = f'Validation'
test_performance['acc'] = round(accuracy, 4)
test_performance['precision'] = round(precision, 4)
test_performance['recall'] = round(recall, 4)
test_performance['f1'] = round(f1, 4)
test_performance['auc'] = round(auc, 4)
test_performance['total_time'] = round(delta_time, 3)
logger.info(f'Sucesfully computed metrics using validation data for {model_name} on {round(delta_time, 3)} seconds')
return pd.DataFrame(test_performance, index=test_performance.keys()).reset_index(drop=True).loc[:0, :]
except Exception as e:
logger.error(f'Error on computing metrics. Exception: {e}')
def evaluate_performance(self, X_train, y_train, X_val, y_val, cv=5, **kwargs):
"""
Computes classification metrics for training and validation data
Parameters
----------
:param X_train: model features for training data [type: np.array]
:param y_train: target array for training data [type: np.array]
:param X_val: model features for validation data [type: np.array]
:param y_val: target array for validation data [type: np.array]
:param cv: K-folds used on cross validation step [type: int, default=5]
:param **kwargs: additional parameters
:arg save: boolean flag for saving files on locak disk [type: bool, default=True]
:arg output_path: path for files to be saved [type: string, default=cwd() + 'output/metrics']
:arg output_filename: name of csv file to be saved [type: string, default='metrics.csv']
Return
------
:return df_performances: dataset with metrics obtained using training and validation data [type: pd.DataFrame]
Application
-----------
# Training model and evaluating performance on training and validation sets
trainer = BinaryClassifier()
trainer.fit(estimator, X_train, y_train)
# Generating a performance dataset
df_performance = trainer.evaluate_performance(X_train, y_train, X_val, y_val)
"""
# Creating an empty DataFrame for storing metrics
df_performances = pd.DataFrame({})
# Iterating over the trained classifiers on the class attribute dictionary
for model_name, model_info in self.classifiers_info.items():
# Verifying if the model was already trained (model_info dict will have the key 'train_performance')
if 'train_performance' in model_info.keys():
df_performances = df_performances.append(model_info['train_performance'])
df_performances = df_performances.append(model_info['val_performance'])
continue
# Returning the model to be evaluated
try:
estimator = model_info['estimator']
except KeyError as e:
logger.error(f'Error on returning the key "estimator" from model_info dict. Model {model_name} was not trained')
continue
# Computing performance on training and validation sets
train_performance = self.compute_train_performance(model_name, estimator, X_train, y_train, cv=cv)
val_performance = self.compute_val_performance(model_name, estimator, X_val, y_val)
# Setting up results on classifiers_info class dict
self.classifiers_info[model_name]['train_performance'] = train_performance
self.classifiers_info[model_name]['val_performance'] = val_performance
# Building a DataFrame with model metrics
model_performance = train_performance.append(val_performance)
df_performances = df_performances.append(model_performance)
df_performances['anomesdia_datetime'] = datetime.now()
# Saving some attributes on classifiers_info for maybe retrieving in the future
model_data = {
'X_train': X_train,
'y_train': y_train,
'X_val': X_val,
'y_val': y_val
}
model_info['model_data'] = model_data
# Saving results if applicable
if 'save' in kwargs and bool(kwargs['save']):
output_path = kwargs['output_path'] if 'output_path' in kwargs else os.path.join(os.getcwd(), 'output/metrics')
output_filename = kwargs['output_filename'] if 'output_filename' in kwargs else 'metrics.csv'
save_data(df_performances, output_path=output_path, filename=output_filename)
return df_performances.reset_index(drop=True)
def feature_importance(self, features, top_n=-1, **kwargs):
"""
Extracts the feature importance method from trained models
Parameters
----------
:param features: list of features considered on training step [type: list]
:param top_n: parameter for filtering just top N features most important [type: int, default=-1]
*obs: when this parameter is equal to -1, all features are considered
:param **kwargs: additional parameters
:arg save: boolean flag for saving files on locak disk [type: bool, default=True]
:arg output_path: path for files to be saved [type: string, default=cwd() + 'output/metrics']
:arg output_filename: name of csv file to be saved [type: string, default='top_features.csv']
Return
------
:return: all_feat_imp: pandas DataFrame com a análise de feature importance dos modelos [type: pd.DataFrame]
Application
-----------
# Training models
trainer = BinaryClassifier()
trainer.fit(estimator, X_train, y_train)
# Returning a feature importance dataset for all models at once
feat_imp = trainer.feature_importance(features=MODEL_FEATURES, top_n=20)
"""
# Creating an empty DataFrame for storing feature importance analysis
feat_imp = pd.DataFrame({})
all_feat_imp = pd.DataFrame({})
# Iterating over models in the class
for model_name, model_info in self.classifiers_info.items():
# Extracting feature importance from models
logger.debug(f'Extracting feature importances from the model {model_name}')
try:
importances = model_info['estimator'].feature_importances_
except KeyError as ke:
logger.warning(f'Model {model_name} was not trained yet, so it is impossible use the method feature_importances_')
continue
except AttributeError as ae:
logger.warning(f'Model {model_name} do not have feature_importances_ method')
continue
# Preparing dataset for storing the info
feat_imp['feature'] = features
feat_imp['importance'] = importances
feat_imp['model'] = model_name
feat_imp['anomesdia_datetime'] = datetime.now()
feat_imp.sort_values(by='importance', ascending=False, inplace=True)
feat_imp = feat_imp.loc[:, ['model', 'feature', 'importance', 'anomesdia_datetime']]
# Saving feature importance info on class attribute dictionary classifiers_info
self.classifiers_info[model_name]['feature_importances'] = feat_imp
all_feat_imp = all_feat_imp.append(feat_imp)
logger.info(f'Feature importances extracted succesfully for the model {model_name}')
# Saving results if applicable
if 'save' in kwargs and bool(kwargs['save']):
output_path = kwargs['output_path'] if 'output_path' in kwargs else os.path.join(os.getcwd(), 'output/metrics')
output_filename = kwargs['output_filename'] if 'output_filename' in kwargs else 'top_features.csv'
save_data(all_feat_imp, output_path=output_path, filename=output_filename)
return all_feat_imp
def training_flow(self, set_classifiers, X_train, y_train, X_val, y_val, features, **kwargs):
"""
This method consolidates all the steps needed for trainign, evaluating and extracting useful
information for machine learning models given specific input arguments. When executed, this
method sequencially applies the fit(), evaluate_performance() and feature_importance() methods
of this given class, saving results if applicable.
This is a good choice for doing all the things at once. The tradeoff is that it's important to
input a set of parameters needed for all individual methods.
Parameters
----------
:param set_classifiers: contains the setup for training the models [type: dict]
set_classifiers = {
'model_name': {
'model': __estimator__,
'params': __estimator_params__
}
}
:param X_train: features for training data [type: np.array]
:param y_train: target array for training data [type: np.array]
:param X_val: model features for validation data [type: np.array]
:param y_val: target array for validation data [type: np.array]
:param features: list of features considered on training step [type: list]
:param **kwargs: additional parameters
:arg approach: sufix string for identifying a different approach for models training [type: string, default='']
:arg random_search: boolean flag for applying RandomizedSearchCV on training [type: bool, default=False]
:arg scoring: optimization metric for RandomizedSearchCV (if applicable) [type: string, default='accuracy']
:arg cv: K-folds used on cross validation evaluation [type: int, default=5]
:arg verbose: verbosity configured on hyperparameters search [type: int, default=-1]
:arg n_jobs: CPUs vcores to be used during hyperparameters search [type: int, default=1]
:arg save: boolean flag for saving files on locak disk [type: bool, default=True]
:arg models_output_path: path for saving model pkl files [type: string, default=cwd() + 'output/models']
:arg metrics_output_path: path for saving performance metrics dataset [type: string, default=cwd() + 'output/metrics']
:arg metrics_output_filename: filename for metrics dataset csv file to be saved [type: string, default='metrics.csv']
:arg featimp_output_filename: filename for feature importance csv file to be saved [type: string, default='top_features.csv']
:arg top_n_featimp: :param top_n: parameter for filtering just top N features most important [type: int, default=-1]
*obs: when this parameter is equal to -1, all features are considered
Return
------
This method don't return anything but the complete training and evaluating flow
Application
-----------
# Initializing object and executing training steps through the method
trainer = BinaryClassifier()
trainer.training_flow(set_classifiers, X_train, y_train, X_val, y_val, features)
"""
# Extracting additional parameters from kwargs dictionary
approach = kwargs['approach'] if 'approach' in kwargs else ''
random_search = kwargs['random_search'] if 'random_search' in kwargs else False
scoring = kwargs['scoring'] if 'scoring' in kwargs else 'accuracy'
cv = kwargs['cv'] if 'cv' in kwargs else 5
verbose = kwargs['verbose'] if 'verbose' in kwargs else -1
n_jobs = kwargs['n_jobs'] if 'n_jobs' in kwargs else 1
save = bool(kwargs['save']) if 'save' in kwargs else True
models_output_path = kwargs['models_output_path'] if 'models_output_path' in kwargs else os.path.join(os.getcwd(), 'output/models')
metrics_output_path = kwargs['metrics_output_path'] if 'metrics_output_path' in kwargs else os.path.join(os.getcwd(), 'output/metrics')
metrics_output_filename = kwargs['metrics_output_filename'] if 'metrics_output_filename' in kwargs else 'metrics.csv'
featimp_output_filename = kwargs['featimp_output_filename'] if 'featimp_output_filename' in kwargs else 'top_features.csv'
top_n_featimp = kwargs['top_n_featimp'] if 'top_n_featimp' in kwargs else -1
# Training models
self.fit(set_classifiers, X_train, y_train, approach=approach, random_search=random_search, scoring=scoring,
cv=cv, verbose=verbose, n_jobs=n_jobs, save=save, output_path=models_output_path)
# Evaluating models
self.evaluate_performance(X_train, y_train, X_val, y_val, save=save, output_path=metrics_output_path,
output_filename=metrics_output_filename)
# Extracting feature importance from models
self.feature_importance(features, top_n=top_n_featimp, save=save, output_path=metrics_output_path,
output_filename=featimp_output_filename)
def plot_metrics(self, figsize=(16, 10), palette='rainbow', cv=5, **kwargs):
"""
Plots metrics results for all trained models using training and validation data
Parameters
----------
:param figsize: figure size [type: tuple, default=(16, 10)]
:param palette: matplotlib colormap for the chart [type: string, default='rainbow']
:param cv: K-folds used on cross validation evaluation [type: int, default=5]
:param **kwargs: additional parameters
:arg save: boolean flag for saving files on locak disk [type: bool, default=True]
:arg output_path: path for files to be saved [type: string, default=cwd() + 'output/imgs']
:arg output_filename: name of csv file to be saved [type: string, default='metrics_comparison.png']
Return
------
This method don't return anything but the custom metrics chart
Application
-----------
# Training models
trainer = BinaryClassifier()
trainer.fit(estimator, X_train, y_train)
# Visualizing performance through a custom chart
trainer.plot_metrics()
"""
# Initializing plot
logger.debug(f'Initializing plot for visual evaluation of classifiers')
metrics = pd.DataFrame()
for model_name, model_info in self.classifiers_info.items():
logger.debug(f'Returning metrics through cross validation for {model_name}')
try:
# Returning classifier variables from classifiers_info class dict attribute
metrics_tmp = pd.DataFrame()
estimator = model_info['estimator']
X_train = model_info['model_data']['X_train']
y_train = model_info['model_data']['y_train']
# Computing metrics using cross validation
accuracy = cross_val_score(estimator, X_train, y_train, cv=cv, scoring='accuracy')
precision = cross_val_score(estimator, X_train, y_train, cv=cv, scoring='precision')
recall = cross_val_score(estimator, X_train, y_train, cv=cv, scoring='recall')
f1 = cross_val_score(estimator, X_train, y_train, cv=cv, scoring='f1')
# Adding up into the empty DataFrame metrics
metrics_tmp['accuracy'] = accuracy
metrics_tmp['precision'] = precision
metrics_tmp['recall'] = recall
metrics_tmp['f1'] = f1
metrics_tmp['model'] = model_name
# Appending metrics for each model
metrics = metrics.append(metrics_tmp)
except Exception as e:
logger.warning(f'Error on returning metrics for {model_name}. Exception: {e}')
continue
logger.debug(f'Transforming metrics DataFrame for applying a visual plot')
try:
# Pivotting metrics (boxplot)
index_cols = ['model']
metrics_cols = ['accuracy', 'precision', 'recall', 'f1']
df_metrics = pd.melt(metrics, id_vars=index_cols, value_vars=metrics_cols)
# Grouping metrics (bars)
metrics_group = df_metrics.groupby(by=['model', 'variable'], as_index=False).mean()
except Exception as e:
logger.error(f'Error on trying to pivot the DataFrame. Exception: {e}')
return
logger.debug(f'Visualizing metrics for trained models')
try:
# Plotting charts
fig, axs = plt.subplots(nrows=2, ncols=1, figsize=figsize)
sns.boxplot(x='variable', y='value', data=df_metrics.sort_values(by='model'), hue='model', ax=axs[0], palette=palette)
sns.barplot(x='variable', y='value', data=metrics_group, hue='model', ax=axs[1], palette=palette, order=metrics_cols)
# Customizing axis
axs[0].set_title(f'Metrics distribution using cross validation on training data with {cv} K-folds', size=14, pad=15)
axs[1].set_title(f'Average of each metric obtained on cross validation', size=14, pad=15)
format_spines(axs[0], right_border=False)
format_spines(axs[1], right_border=False)
axs[1].get_legend().set_visible(False)
AnnotateBars(n_dec=3, color='black', font_size=12).vertical(axs[1])
except Exception as e:
logger.error(f'Error when plotting charts for metrics. Exception: {e}')
return
# Tighting layout
plt.tight_layout()
# Saving figure if applicable
if 'save' in kwargs and bool(kwargs['save']):
output_path = kwargs['output_path'] if 'output_path' in kwargs else os.path.join(os.getcwd(), 'output/imgs')
output_filename = kwargs['output_filename'] if 'output_filename' in kwargs else 'metrics_comparison.png'
save_fig(fig, output_path=output_path, img_name=output_filename)
def plot_feature_importance(self, features, top_n=20, palette='viridis', **kwargs):
"""
Plots a chart for visualizing features most important for each trained model on the class
Parameters
----------
:param features: list of features considered on training step [type: list]
:param top_n: parameter for filtering just top N features most important [type: int, default=20]
*obs: when this parameter is equal to -1, all features are considered
:param palette: matplotlib colormap for the chart [type: string, default='viridis']
:param **kwargs: additional parameters
:arg save: boolean flag for saving files on locak disk [type: bool, default=True]
:arg output_path: path for files to be saved [type: string, default=cwd() + 'output/imgs']
:arg output_filename: name of png file to be saved [type: string, default='feature_importances.png']
Return
------
This method don't return anything but the custom chart for feature importances analysis
Application
-----------
# Training models
trainer = BinaryClassifier()
trainer.fit(estimator, X_train, y_train)
# Visualizing performance through a custom chart
trainer.plot_feature_importance()
"""
# Extracting chart parameters parâmetros de plotagem
logger.debug('Initializing feature importance visual analysis for the models')
feat_imp = pd.DataFrame({})
i = 0
ax_del = 0
nrows = len(self.classifiers_info.keys())
fig, axs = plt.subplots(nrows=nrows, figsize=(16, nrows * 6))
sns.set(style='white', palette='muted', color_codes=True)
# Iterating over each trained model on the class
for model_name, model_info in self.classifiers_info.items():
# Seeing if it's possible to extract feature importances
logger.debug(f'Extracting feature importance from {model_name}')
try:
importances = model_info['estimator'].feature_importances_
except:
logger.warning(f'{model_name} does not have feature_importances_ method')
ax_del += 1
continue
# Preparing a dataset for storing information
feat_imp['feature'] = features
feat_imp['importance'] = importances
feat_imp.sort_values(by='importance', ascending=False, inplace=True)
logger.debug(f'Plotting feature importances for {model_name}')
try:
# Using seaborn's barplot for plotting
sns.barplot(x='importance', y='feature', data=feat_imp.iloc[:top_n, :], ax=axs[i], palette=palette)
# Customizing chart
axs[i].set_title(f'Most Important Features for {model_name}', size=14)
format_spines(axs[i], right_border=False)
i += 1
logger.info(f'Succesfully plotted feature importance analysis for {model_name}')
except Exception as e:
logger.error(f'Error on generating feature importances chart for {model_name}. Exception: {e}')
continue
# Eliminating additional axis if applicable
if ax_del > 0:
logger.debug('Deleting axis for models without feature_importances_ method')
try:
for i in range(-1, -(ax_del+1), -1):
fig.delaxes(axs[i])
except Exception as e:
logger.error(f'Error on deleting axis. Exception: {e}')
# Tighting layout
plt.tight_layout()
# Saving figure if applicable
if 'save' in kwargs and bool(kwargs['save']):
output_path = kwargs['output_path'] if 'output_path' in kwargs else os.path.join(os.getcwd(), 'output/imgs')
output_filename = kwargs['output_filename'] if 'output_filename' in kwargs else 'feature_importances.png'
save_fig(fig, output_path=output_path, img_name=output_filename)
def custom_confusion_matrix(self, model_name, y_true, y_pred, classes, cmap, normalize=False):
"""
Plots a custom confusion matrix for only one model. In practive this method is called in a top layer
through another method that iterates over all trained models on the class. This was a good way for
keep the organization on the class by centralizing all confusion matrix char modifications in
one specific method
Parameters
----------
:param model_name: model key on self.classifiers_info class attribute [type: string]
:param y_true: target array for source data [type: np.array]
:param y_pred: predictions array generated by a predict method [type: np.array]
:param classes: name for classes to be put on the matrix [type: list]
:param cmap: matplotlib colormap for the matrix chart [type: matplotlib.colormap]
:param normalize: flag for normalizing cells on the matrix [type: bool, default=False]
Return
-------
This method don't return anything but the customization of confusion matrix
Application
-----------
This method is not usually executed by users outside the class.
Please take a look at the self.plot_confusion_matrix() method.
"""
# Returning confusion matrix through the sklearn's function
conf_mx = confusion_matrix(y_true, y_pred)
# Plotting the matrix
plt.imshow(conf_mx, interpolation='nearest', cmap=cmap)
plt.colorbar()
tick_marks = np.arange(len(classes))
# Customizing axis
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
# Customizing entries
fmt = '.2f' if normalize else 'd'
thresh = conf_mx.max() / 2.
for i, j in itertools.product(range(conf_mx.shape[0]), range(conf_mx.shape[1])):
plt.text(j, i, format(conf_mx[i, j]),
horizontalalignment='center',
color='white' if conf_mx[i, j] > thresh else 'black')
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.title(f'{model_name}\nConfusion Matrix', size=12)
def plot_confusion_matrix(self, cmap=plt.cm.Blues, normalize=False, **kwargs):
"""
Iterates over the dictionary of trained models and builds a custom conf matrix for each one
using training and validation data
Parameters
----------
:param cmap: matplotlib colormap for the matrix chart [type: matplotlib.colormap, default=]
:param normalize: flag for normalizing cells on the matrix [type: bool, default=False]
:param **kwargs: additional parameters
:arg save: boolean flag for saving files on locak disk [type: bool, default=True]
:arg output_path: path for files to be saved [type: string, default=cwd() + 'output/imgs']
:arg output_filename: name of png file to be saved [type: string, default='confusion_matrix.png']
Return
------
This method don't return anything but the plot of custom confusion matrix for trained models
Application
-----------
trainer = BinaryClassifier()
trainer.training_flow(set_classifiers, X_train, y_train, X_val, y_val, features)
trainer.plot_confusion_matrix(output_path=OUTPUT_PATH)
"""
# Setting up parameters
logger.debug('Initializing confusion matrix plotting for the models')
k = 1
nrows = len(self.classifiers_info.keys())
fig = plt.figure(figsize=(10, nrows * 4))
sns.set(style='white', palette='muted', color_codes=True)
# Iterating over each trained model on classifiers_info class attribute
for model_name, model_info in self.classifiers_info.items():
logger.debug(f'Returning training and validation data for {model_name}')
try:
# Returning data for the model
X_train = model_info['model_data']['X_train']
y_train = model_info['model_data']['y_train']
X_val = model_info['model_data']['X_val']
y_val = model_info['model_data']['y_val']
classes = np.unique(y_train)
except Exception as e:
logger.error(f'Error when returning data already saved for {model_name}. Exception: {e}')
continue
# Making predictions for training (cross validation) and validation data
logger.debug(f'Making predictions on training and validation data for {model_name}')
try:
train_pred = cross_val_predict(model_info['estimator'], X_train, y_train, cv=5)
val_pred = model_info['estimator'].predict(X_val)
except Exception as e:
logger.error(f'Error on making predictions for {model_name}. Exception: {e}')
continue
logger.debug(f'Creating a confusion matrix for {model_name}')
try:
# Plotting the matrix using training data
plt.subplot(nrows, 2, k)
self.custom_confusion_matrix(model_name + ' Train', y_train, train_pred, classes=classes,
cmap=cmap, normalize=normalize)
k += 1
# Plotting the matrix using validation data
plt.subplot(nrows, 2, k)
self.custom_confusion_matrix(model_name + ' Validation', y_val, val_pred, classes=classes,
cmap=plt.cm.Greens, normalize=normalize)
k += 1
logger.info(f'Confusion matrix succesfully plotted for {model_name}')
except Exception as e:
logger.error(f'Error when generating confusion matrix for {model_name}. Exception: {e}')
continue
# Tighting layout
plt.tight_layout()
# Saving image if applicable
if 'save' in kwargs and bool(kwargs['save']):
output_path = kwargs['output_path'] if 'output_path' in kwargs else os.path.join(os.getcwd(), 'output/imgs')
output_filename = kwargs['output_filename'] if 'output_filename' in kwargs else 'confusion_matrix.png'
save_fig(fig, output_path=output_path, img_name=output_filename)
def plot_roc_curve(self, figsize=(16, 6), **kwargs):
"""
Plots a custom ROC Curve for each trained model on dictionary class attribute
for training and validation data
Parameters
----------
:param figsize: figure size [type: tuple, default=(16, 6)]
:param **kwargs: additional parameters
:arg save: boolean flag for saving files on locak disk [type: bool, default=True]
:arg output_path: path for files to be saved [type: string, default=cwd() + 'output/imgs']
:arg output_filename: name of png file to be saved [type: string, default='roc_curve.png']
Return
------
This method don't return anything but a custom chart for the ROC Curve
Application
-----------
trainer = BinaryClassifier()
trainer.training_flow(set_classifiers, X_train, y_train, X_val, y_val, features)
trainer.plot_roc_curve(output_path=OUTPUT_PATH)
"""
# Creating figure
logger.debug('Initializing a ROC Curve analysis for trained models')
fig, axs = plt.subplots(ncols=2, figsize=figsize)
# Iterating over trained models on class attribute
for model_name, model_info in self.classifiers_info.items():
logger.debug(f'Returning labels and training and validation scores for {model_name}')
try:
# Returning labels for training and validation
y_train = model_info['model_data']['y_train']
y_val = model_info['model_data']['y_val']
# Returning scores already computed on performance evaluation method
train_scores = model_info['train_scores']
val_scores = model_info['val_scores']
except Exception as e:
logger.error(f'Error on returning parameters for {model_name}. Exception: {e}')
continue
logger.debug(f'Computing FPR, TPR and AUC on training and validation for {model_name}')
try:
# Computing false positive rate and true positive rate
train_fpr, train_tpr, train_thresholds = roc_curve(y_train, train_scores)
test_fpr, test_tpr, test_thresholds = roc_curve(y_val, val_scores)
# Returning AUC already computed on performance evaluation method
train_auc = model_info['train_performance']['auc'].values[0]
test_auc = model_info['val_performance']['auc'].values[0]
except Exception as e:
logger.error(f'Error when computing parameters for {model_name}. Exception: {e}')
continue
logger.debug(f'Plotting the ROC Curves for {model_name}')
try:
# Plotting ROC Curve (training)
plt.subplot(1, 2, 1)
plt.plot(train_fpr, train_tpr, linewidth=2, label=f'{model_name} auc={train_auc}')
plt.plot([0, 1], [0, 1], 'k--')
plt.axis([-0.02, 1.02, -0.02, 1.02])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title(f'ROC Curve - Train Data')
plt.legend()
# Plotting ROC Curve (training)
plt.subplot(1, 2, 2)
plt.plot(test_fpr, test_tpr, linewidth=2, label=f'{model_name} auc={test_auc}')
plt.plot([0, 1], [0, 1], 'k--')
plt.axis([-0.02, 1.02, -0.02, 1.02])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title(f'ROC Curve - Validation Data', size=12)
plt.legend()
except Exception as e:
logger.error(f'Error on plotting ROC Curve for {model_name}. Exception: {e}')
continue
# Tighting laout
plt.tight_layout()
# Saving image if applicable
if 'save' in kwargs and bool(kwargs['save']):
output_path = kwargs['output_path'] if 'output_path' in kwargs else os.path.join(os.getcwd(), 'output/imgs')
output_filename = kwargs['output_filename'] if 'output_filename' in kwargs else 'roc_curve.png'
save_fig(fig, output_path=output_path, img_name=output_filename)
def plot_score_distribution(self, shade=True, **kwargs):
"""
Plots useful charts for analysing the score distribution of a model through a kdeplot.
When executed, this method builds up two charts: one for training and another for validation
where each one is given by two curves for each target class
Parameters
----------
:param shade: flag for filling down the area under the distribution curve [type: bool, default=True]
:param **kwargs: additional parameters
:arg save: boolean flag for saving files on locak disk [type: bool, default=True]
:arg output_path: path for files to be saved [type: string, default=cwd() + 'output/imgs']
:arg output_filename: name of png file to be saved [type: string, default='score_distribution.png']
Return
------
This method don't return anything but the score distribution plot
Application
-----------
trainer = BinaryClassifier()
trainer.training_flow(set_classifiers, X_train, y_train, X_val, y_val, features)
trainer.plot_score_distribution(output_path=OUTPUT_PATH)
"""
# Creating figure
logger.debug('Initializing distribution score analysis for the models')
i = 0
nrows = len(self.classifiers_info.keys())
fig, axs = plt.subplots(nrows=nrows, ncols=2, figsize=(16, nrows * 4))
sns.set(style='white', palette='muted', color_codes=True)
# Iterating over trained classifiers on the class attribute
for model_name, model_info in self.classifiers_info.items():
logger.debug(f'Returning labels and trainind and validation score for {model_name}')
try:
# Returning training and validation target labels label de treino e de teste
y_train = model_info['model_data']['y_train']
y_val = model_info['model_data']['y_val']
# Returning scores that were already computed on evaluate_performance() method
train_scores = model_info['train_scores']
test_scores = model_info['val_scores']
except Exception as e:
logger.error(f'Error on returning parameters for {model_name}. Exception: {e}')
continue
logger.debug(f'Plotting the score distribution chart for {model_name}')
try:
# Building distribution chart for training data
sns.kdeplot(train_scores[y_train == 1], ax=axs[i, 0], label='y=1', shade=shade, color='crimson')
sns.kdeplot(train_scores[y_train == 0], ax=axs[i, 0], label='y=0', shade=shade, color='darkslateblue')
axs[i, 0].set_title(f'Score Distribution for {model_name} - Training')
axs[i, 0].legend()
axs[i, 0].set_xlabel('Score')
format_spines(axs[i, 0], right_border=False)
# Building distribution chart for validation data
sns.kdeplot(test_scores[y_val == 1], ax=axs[i, 1], label='y=1', shade=shade, color='crimson')
sns.kdeplot(test_scores[y_val == 0], ax=axs[i, 1], label='y=0', shade=shade, color='darkslateblue')
axs[i, 1].set_title(f'Score Distribution for {model_name} - Validation')
axs[i, 1].legend()
axs[i, 1].set_xlabel('Score')
format_spines(axs[i, 1], right_border=False)
i += 1
except Exception as e:
logger.error(f'Error on returning curve for {model_name}. Exception: {e}')
continue
# Tighting layout
plt.tight_layout()
# Saving image if applicable
if 'save' in kwargs and bool(kwargs['save']):
output_path = kwargs['output_path'] if 'output_path' in kwargs else os.path.join(os.getcwd(), 'output/imgs')
output_filename = kwargs['output_filename'] if 'output_filename' in kwargs else 'score_distribution.png'
save_fig(fig, output_path=output_path, img_name=output_filename)
def plot_score_bins(self, bin_range=.20, **kwargs):
"""
Plots a distribution score analysis splitted on categorical bins.
Parameters
----------
:param bin_range: range for score bins [type: float, default=.20]
:param **kwargs: additional parameters
:arg save: boolean flag for saving files on locak disk [type: bool, default=True]
:arg output_path: path for files to be saved [type: string, default=cwd() + 'output/imgs']
:arg output_filename: name of png file to be saved [type: string, default='score_bins.png']
Return
------
This method don't return anything but a custom chart or visualizing scores at different bins
Application
-----------
trainer = BinaryClassifier()
trainer.training_flow(set_classifiers, X_train, y_train, X_val, y_val, features)
trainer.plot_score_distribution(output_path=OUTPUT_PATH)
"""
logger.debug('Initializing score analysis on categorical bins for trained models')
i = 0
nrows = len(self.classifiers_info.keys())
fig1, axs1 = plt.subplots(nrows=nrows, ncols=2, figsize=(16, nrows * 4))
fig2, axs2 = plt.subplots(nrows=nrows, ncols=2, figsize=(16, nrows * 4))
# Creating a list of bins
bins = np.arange(0, 1.01, bin_range)
bins_labels = [str(round(list(bins)[i - 1], 2)) + ' a ' + str(round(list(bins)[i], 2)) for i in range(len(bins)) if i > 0]
# Iterating ver trained models on class attribute
for model_name, model_info in self.classifiers_info.items():
logger.debug(f'Returning parameters for {model_name}')
try:
# Retrieving the train scores and creating a DataFrame
train_scores = model_info['train_scores']
y_train = model_info['model_data']['y_train']
df_train_scores = pd.DataFrame({})
df_train_scores['scores'] = train_scores
df_train_scores['target'] = y_train
df_train_scores['faixa'] = pd.cut(train_scores, bins, labels=bins_labels)
# Computing the distribution for each bin
df_train_rate = pd.crosstab(df_train_scores['faixa'], df_train_scores['target'])
df_train_percent = df_train_rate.div(df_train_rate.sum(1).astype(float), axis=0)
# Retrieving val scores and creating a DataFrame
val_scores = model_info['val_scores']
y_val = model_info['model_data']['y_val']
df_val_scores = pd.DataFrame({})
df_val_scores['scores'] = val_scores
df_val_scores['target'] = y_val
df_val_scores['faixa'] = pd.cut(val_scores, bins, labels=bins_labels)
# Computing the distribution for each bin
df_val_rate = pd.crosstab(df_val_scores['faixa'], df_val_scores['target'])
df_val_percent = df_val_rate.div(df_val_rate.sum(1).astype(float), axis=0)
except Exception as e:
logger.error(f'Error on returning and computing parameters for {model_name}. Exception: {e}')
continue
logger.debug(f'Plotting score distribution on bins for {model_name}')
try:
sns.countplot(x='faixa', data=df_train_scores, ax=axs1[i, 0], hue='target', palette=['darkslateblue', 'crimson'])
sns.countplot(x='faixa', data=df_val_scores, ax=axs1[i, 1], hue='target', palette=['darkslateblue', 'crimson'])
# Formatting legend and titles
axs1[i, 0].legend(loc='upper right')
axs1[i, 1].legend(loc='upper right')
axs1[i, 0].set_title(f'Score Distribution on Bins for {model_name} - Training', size=14)
axs1[i, 1].set_title(f'Score Distribution on Bins for {model_name} - Validation', size=14)
# Adding up data labels
AnnotateBars(n_dec=0, color='black', font_size=12).vertical(axs1[i, 0])
AnnotateBars(n_dec=0, color='black', font_size=12).vertical(axs1[i, 1])
# Formatting axis
format_spines(axs1[i, 0], right_border=False)
format_spines(axs1[i, 1], right_border=False)
logger.debug(f'Plotting percentual analysis on bins for {model_name}')
for df_percent, ax in zip([df_train_percent, df_val_percent], [axs2[i, 0], axs2[i, 1]]):
df_percent.plot(kind='bar', ax=ax, stacked=True, color=['darkslateblue', 'crimson'], width=0.6)
for p in ax.patches:
# Colecting parameters for adding data labels
height = p.get_height()
width = p.get_width()
x = p.get_x()
y = p.get_y()
# Formatting parameters
label_text = f'{round(100 * height, 1)}%'
label_x = x + width - 0.30
label_y = y + height / 2
ax.text(label_x, label_y, label_text, ha='center', va='center', color='white',
fontweight='bold', size=10)
format_spines(ax, right_border=False)
# Formatting legend and title
axs2[i, 0].set_title(f'Score Distribution on Bins (Percent) for {model_name} - Training', size=14)
axs2[i, 1].set_title(f'Score Distribution on Bins (Percent) for {model_name} - Validation', size=14)
i += 1
except Exception as e:
logger.error(f'Error on plotting score distribution on bins for {model_name}. Exception: {e}')
continue
# Tighting layout
fig1.tight_layout()
fig2.tight_layout()
# Saving image
if 'save' in kwargs and bool(kwargs['save']):
output_path = kwargs['output_path'] if 'output_path' in kwargs else os.path.join(os.getcwd(), 'output/imgs')
save_fig(fig1, output_path, img_name='score_bins.png')
save_fig(fig2, output_path, img_name='score_bins_percent.png')
def plot_learning_curve(self, ylim=None, cv=5, n_jobs=3, train_sizes=np.linspace(.1, 1.0, 10), **kwargs):
"""
Plots an excellent chart for analysing a learning curve for trained models.
Parameters
----------
:param ylim: vertical axis limit [type: int, default=None]
:param cv: K-folds used on cross validation [type: int, default=5]
:param n_jobs: CPUs vcores for processing [type: int, default=3]
:param train_sizes: array with steps for measuring performance [type: np.array, default=np.linspace(.1, 1.0, 10)]
:param **kwargs: additional parameters
:arg save: boolean flag for saving files on locak disk [type: bool, default=True]
:arg output_path: path for files to be saved [type: string, default=cwd() + 'output/imgs']
:arg output_filename: name of png file to be saved [type: string, default='learning_curve.png']
Return
------
This method don't return anything but the learning curve chart
Application
-----------
trainer = BinaryClassifier()
trainer.training_flow(set_classifiers, X_train, y_train, X_val, y_val, features)
trainer.plot_learning_curve()
"""
logger.debug(f'Initializing plots for learning curves for trained models')
i = 0
nrows = len(self.classifiers_info.keys())
fig, axs = plt.subplots(nrows=nrows, figsize=(16, nrows * 6))
# Iterating over each model in class attribute
for model_name, model_info in self.classifiers_info.items():
ax = axs[i]
logger.debug(f'Returning parameters for {model_name} and applying learning_curve method')
try:
model = model_info['estimator']
X_train = model_info['model_data']['X_train']
y_train = model_info['model_data']['y_train']
# Calling learning_curve function for returning scores for training and validation
train_sizes, train_scores, val_scores = learning_curve(model, X_train, y_train, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
# Computando médias e desvio padrão (treino e validação)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
val_scores_mean = np.mean(val_scores, axis=1)
val_scores_std = np.std(val_scores, axis=1)
except Exception as e:
logger.error(f'Error on returning parameters and applying learning curve for {model_name}. Exception: {e}')
continue
logger.debug(f'Plotting learning curves for training and validation data for {model_name}')
try:
# Results on training data
ax.plot(train_sizes, train_scores_mean, 'o-', color='navy', label='Training Score')
ax.fill_between(train_sizes, (train_scores_mean - train_scores_std), (train_scores_mean + train_scores_std),
alpha=0.1, color='blue')
# Results on validation data
ax.plot(train_sizes, val_scores_mean, 'o-', color='red', label='Cross Val Score')
ax.fill_between(train_sizes, (val_scores_mean - val_scores_std), (val_scores_mean + val_scores_std),
alpha=0.1, color='crimson')
# Customizando plotagem
ax.set_title(f'Model {model_name} - Learning Curve', size=14)
ax.set_xlabel('Training size (m)')
ax.set_ylabel('Score')
ax.grid(True)
ax.legend(loc='best')
except Exception as e:
logger.error(f'Error on plotting learning curve for {model_name}. Exception: {e}')
continue
i += 1
# Tighting layout
plt.tight_layout()
# Saving image
if 'save' in kwargs and bool(kwargs['save']):
output_path = kwargs['output_path'] if 'output_path' in kwargs else os.path.join(os.getcwd(), 'output/imgs')
output_filename = kwargs['output_filename'] if 'output_filename' in kwargs else 'learning_curve.png'
save_fig(fig, output_path=output_path, img_name=output_filename)
def plot_shap_analysis(self, model_name, features, figsize=(16, 10), **kwargs):
"""
Plots an useful shap analysis for interpreting a specific model
Parameters
----------
:param model_name: a key for extracting an estimator from classifier info dict class attribute [type: string]
:param features: list of features used on training the model [type: list]
:param figsize: figure size [type: tuple, default=(16, 10)]
:param **kwargs: additional parameters
:arg save: boolean flag for saving files on locak disk [type: bool, default=True]
:arg output_path: path for files to be saved [type: string, default=cwd() + 'output/imgs']
:arg output_filename: name of png file to be saved [type: string, default='shap_analysis_modelname.png']
Return
------
This method don't return anything but the plot of shap analysis (violin)
Application
-----------
trainer = BinaryClassifier()
trainer.training_flow(set_classifiers, X_train, y_train, X_val, y_val, features)
trainer.plot_shap_analysis(model_name='LightGBM', features=MODEL_FEATURES)
"""
logger.debug(f'Explaining {model_name} through a violin plot on shap analysis')
try:
model_info = self.classifiers_info[model_name]
model = model_info['estimator']
except Exception as e:
logger.error(f'Model key {model_name} not exists or model was not trained. Available options: {list(self.classifiers_info.keys())}')
return
logger.debug(f'Returning paramteres for {model_name}')
try:
# Returning model parameters
X_train = model_info['model_data']['X_train']
X_val = model_info['model_data']['X_val']
df_train = pd.DataFrame(X_train, columns=features)
df_val = pd.DataFrame(X_val, columns=features)
except Exception as e:
logger.error(f'Error on returning parameters for {model_name}. Exception: {e}')
logger.debug(f'Creating a explainer and generating shap values for {model_name}')
try:
explainer = shap.TreeExplainer(model, df_train)
shap_values = explainer.shap_values(df_val)
except Exception as e:
try:
logger.warning(f'TreeExplainer does not fit on {model_name}. Trying LinearExplainer')
explainer = shap.LinearExplainer(model, df_train)
shap_values = explainer.shap_values(df_val, check_additivity=False)
except Exception as e:
logger.error(f'Error on returning parameters for {model_name}. Exception: {e}')
return
logger.debug(f'Making a shap analysis violin plot for {model_name}')
try:
fig, ax = plt.subplots(figsize=figsize)
try:
shap.summary_plot(shap_values, df_val, plot_type='violin', show=False)
except Exception as e:
shap.summary_plot(shap_values[1], df_val, plot_type='violin', show=False)
plt.title(f'Shap Analysis (violin) for {model_name}')
if 'save' in kwargs and bool(kwargs['save']):
output_path = kwargs['output_path'] if 'output_path' in kwargs else os.path.join(os.getcwd(), 'output/imgs')
output_filename = kwargs['output_filename'] if 'output_filename' in kwargs else f'shap_analysis_{model_name}.png'
save_fig(fig, output_path, img_name=output_filename)
except Exception as e:
logger.error(f'Error on plotting shap analysis for {model_name}. Exception: {e}')
return
def visual_analysis(self, features, metrics=True, feat_imp=True, cfmx=True, roc=True, score_dist=True, score_bins=True,
learn_curve=True, model_shap=None, show=False, save=True, output_path=os.path.join(os.getcwd(), 'output/imgs')):
"""
Makes a complete visual analysis for trained models by executing all individual graphic functions
squencially, passing arguments as needed
Parameters
----------
:param features: features list used on models training [type: list]
:param metrics: flag for executing plot_metrics() method [type: bool, default=True]
:param feat_imp: flag for executing plot_feature_importance() method [type: bool, default=True]
:param cfmx: flag for executing plot_confusion_matrix() method [type: bool, default=True]
:param roc: flag for executing plot_roc_curve() method [type: bool, default=True]
:param score_dist: flag for executing plot_score_distribution() method [type: bool, default=True]
:param score_bins: flag for executing plot_score_bins() method [type: bool, default=True]
:param learn_curve: flag for executing plot_learning_curve() method [type: bool, default=True]
:param model_shap: key string for selecting a model for applying shap analysis [type: string, default=None]
:param show: flag for showing up the figures on screen or jupyter notebook cel [type: bool, default=False]
:param save: flag for saving figures on local machine [type: bool, default=True]
:param output_path: path for saving files [type: string, default=cwd() + 'output/imgs']
Return
------
This method don't return anything but the generation of plots following arguments configuration
Application
-----------
trainer = BinaryClassifier()
trainer.training_flow(set_classifiers, X_train, y_train, X_val, y_val, features)
trainer.visual_analysis(features=MODEL_FEATURES)
"""
# Verifying parameter for showing up figs
backend_ = mpl.get_backend()
if not show:
mpl.use('Agg')
logger.debug(f'Initializing visual analysis for trained models')
try:
# Plotting metrics
if metrics:
self.plot_metrics(save=save, output_path=output_path)
# Plotting feature importances
if feat_imp:
self.plot_feature_importance(features=features, save=save, output_path=output_path)
# Plotting confusion matrix
if cfmx:
self.plot_confusion_matrix(save=save, output_path=output_path)
# Plotting ROC curve
if roc:
self.plot_roc_curve(save=save, output_path=output_path)
# Plotting score distribution
if score_dist:
self.plot_score_distribution(save=save, output_path=output_path)
# Plotting score distribution on bins
if score_bins:
self.plot_score_bins(save=save, output_path=output_path)
# Plotting learning curve
if learn_curve:
self.plot_learning_curve(save=save, output_path=output_path)
# Plotting shap analysis
if model_shap is not None:
self.plot_shap_analysis(save=save, model_name=model_shap, features=features, output_path=output_path)
except Exception as e:
logger.error(f'Error on plotting visual analysis for models. Exception: {e}')
# Reseting configuration
mpl.use(backend_)
def get_estimator(self, model_name):
"""
Returns the estimator of a selected model
Parameters
----------
:param model_name: key string for extracting the model from classifiers_info class attribute [type: string]
Return
------
:return model: model estimator stored on class attribute [type: estimator]
Application
-----------
model = trainer.get_estimator(model_name='RandomForestClassifier')
"""
logger.debug(f'Returning estimator for model {model_name} stored on class attribute')
try:
model_info = self.classifiers_info[model_name]
return model_info['estimator']
except Exception as e:
logger.error(f'Key string {model_name} does not exists or was not trained. Options: {list(self.classifiers_info.keys())}')
return
def get_metrics(self, model_name):
"""
Returns metrics computed for a specific model
Parameters
----------
:param model_name: key string for extracting the model from classifiers_info class attribute [type: string]
Return
------
:return metrics: metrics dataset for a specific model [type: DataFrame]
Application
-----------
metrics = trainer.get_metrics(model_name='RandomForestClassifier')
"""
logger.debug(f'Returning metrics computed for {model_name}')
try:
# Returning dictionary class attribute with stored information of model
model_info = self.classifiers_info[model_name]
train_performance = model_info['train_performance']
val_performance = model_info['val_performance']
model_performance = train_performance.append(val_performance)
model_performance.reset_index(drop=True, inplace=True)
return model_performance
except Exception as e:
logger.error(f'Error on returning metrics for {model_name}. Exception: {e}')
def get_model_info(self, model_name):
"""
Returns a complete dictionary with all information for models stored on class attribute
Parameters
----------
:param model_name: key string for extracting the model from classifiers_info class attribute [type: string]
Return
------
:return model_info: dictionary with stored model's informations [type: dict]
model_info = {
'estimator': model,
'train_scores': np.array,
'test_scores': np.array,
'train_performance': pd.DataFrame,
'test_performance': pd.DataFrame,
'model_data': {
'X_train': np.array,
'y_train': np.array,
'X_val': np.array,
'y_val': np.array,
'feature_importances': pd.DataFrame
}
}
Application
-----------
metrics = trainer.get_model_info(model_name='RandomForestClassifier')
"""
logger.debug(f'Returning all information for {model_name}')
try:
# Retornando dicionário do modelo
return self.classifiers_info[model_name]
except Exception as e:
logger.error(f'Error on returning informations for {model_name}. Exception {e}')
def get_classifiers_info(self):
"""
Returns the class attribute classifiers_info with all information for all models
Parameters
----------
None
Return
------
:return classifiers_info: dictionary with information for all models
classifiers_info ={
'model_name': model_info = {
'estimator': model,
'train_scores': np.array,
'test_scores': np.array,
'train_performance': pd.DataFrame,
'test_performance': pd.DataFrame,
'model_data': {
'X_train': np.array,
'y_train': np.array,
'X_val': np.array,
'y_val': np.array,
'feature_importances': pd.DataFrame
}
}
"""
return self.classifiers_info
"""
---------------------------------------------------
---------------- 2. CLASSIFICATION ----------------
2.2 Multiclass Classification
---------------------------------------------------
"""
class MulticlassClassifier:
"""
Trains and evaluate multiclass classification models.
The methods of this class enable a complete management of
multiclass classification tasks in every step of the development
workflow
"""
def __init__(self, encoded_target=False):
"""
:param encoded_target: flag for encoding or not the target variable [type: bool, default=False]
"""
self.classifiers_info = {}
self.encoded_target = encoded_target
def fit(self, set_classifiers, X_train, y_train, **kwargs):
"""
Trains each classifier in set_classifiers dictionary through a defined setup
Parameters
----------
:param set_classifiers: contains the setup for training the models [type: dict]
set_classifiers = {
'model_name': {
'model': __estimator__,
'params': __estimator_params__
}
}
:param X_train: features for training data [type: np.array]
:param y_train: target array for training data [type: np.array]
:param **kwargs: additional parameters
:arg approach: sufix string for identifying a different approach for models training [type: string, default='']
:arg random_search: boolean flag for applying RandomizedSearchCV on training [type: bool, default=False]
:arg scoring: optimization metric for RandomizedSearchCV (if applicable) [type: string, default='accuracy']
:arg cv: K-folds used on cross validation evaluation on RandomizerSearchCV [type: int, default=5]
:arg verbose: verbosity configured on hyperparameters search [type: int, default=-1]
:arg n_jobs: CPUs vcores to be used during hyperparameters search [type: int, default=1]
:arg save: flag for saving pkl/joblib files for trained models on local disk [type: bool, default=True]
:arg output_path: folder path for pkl/joblib files to be saved [type: string, default=cwd() + 'output/models']
:arg model_ext: extension for model files (pkl or joblib) without point "." [type: string, default='pkl']
Return
------
This method doesn't return anything but the set of self.classifiers_info class attribute with useful info
Application
-----------
# Initializing object and training models
trainer = MulticlassClassifier()
trainer.fit(set_classifiers, X_train_prep, y_train)
"""
# Extracting approach from kwargs dictionary
approach = kwargs['approach'] if 'approach' in kwargs else ''
# Iterating over each model on set_classifiers dictionary
try:
for model_name, model_info in set_classifiers.items():
# Defining a custom key for the further classifiers_info class attribute dictionary
model_key = model_name + approach
logger.debug(f'Training model {model_key}')
model = model_info['model']
# Creating an empty dictionary for storing model's info
self.classifiers_info[model_key] = {}
# Validating the application of random search for hyperparameter tunning
try:
if 'random_search' in kwargs and bool(kwargs['random_search']):
params = model_info['params']
# Returning additional parameters from kwargs dictionary
scoring = kwargs['scoring'] if 'scoring' in kwargs else 'accuracy'
cv = kwargs['cv'] if 'cv' in kwargs else 5
verbose = kwargs['verbose'] if 'verbose' in kwargs else -1
n_jobs = kwargs['n_jobs'] if 'n_jobs' in kwargs else 1
# Preparing and applying search
rnd_search = RandomizedSearchCV(model, params, scoring=scoring, cv=cv,
verbose=verbose, random_state=42, n_jobs=n_jobs)
logger.debug('Applying RandomizedSearchCV')
rnd_search.fit(X_train, y_train)
# Saving the best model on classifiers_info class dictionary
self.classifiers_info[model_key]['estimator'] = rnd_search.best_estimator_
else:
# Training model without searching for best hyperparameters
self.classifiers_info[model_key]['estimator'] = model.fit(X_train, y_train)
except TypeError as te:
logger.error(f'Error when trying RandomizedSearch. Exception: {te}')
return
# Saving pkl files if applicable
if 'save' in kwargs and bool(kwargs['save']):
model_ext = kwargs['model_ext'] if 'model_ext' in kwargs else 'pkl'
logger.debug(f'Saving model file for {model_name} on {model_ext} format')
model = self.classifiers_info[model_key]['estimator']
output_path = kwargs['output_path'] if 'output_path' in kwargs else os.path.join(os.getcwd(), 'output/models')
anomesdia = datetime.now().strftime('%Y%m%d')
filename = model_name.lower() + '_' + anomesdia + '.' + model_ext
save_model(model, output_path=output_path, filename=filename)
except AttributeError as ae:
logger.error(f'Error when training models. Exception: {ae}')
def compute_train_performance(self, model_name, estimator, X, y, cv=5, target_names=None):
"""
Applies cross validation for returning the main classification metrics for trained models.
In practice, this method is usually applied on a top layer of the class, or in other words, it is usually
executed by another method for extracting metrics on training and validating data
Parameters
----------
:param model_name: model key on self.classifiers_info class attribute [type: string]
:param estimator: model estimator to be evaluated [type: object]
:param X: model features for training data [type: np.array]
:param y: target array for training data [type: np.array]
:param cv: K-folds used on cross validation step [type: int, default=5]
:param target_names: custom names for target indices [type: list, default=None]
Return
------
:return train_performance: dataset with metrics computed using cross validation on training set [type: pd.DataFrame]
Application
-----------
# Initializing and training models
trainer = MulticlassClassifier()
trainer.fit(model, X_train, y_train)
train_performance = trainer.compute_train_performance(model_name, estimator, X_train, y_train)
"""
# Computing metrics using cross validation
logger.debug(f'Computing metrics on {model_name} using cross validation with {cv} K-folds')
try:
# Initialing time measuring and making predictions using cross validation
t0 = time.time()
y_pred = cross_val_predict(estimator, X, y, cv=cv)
# Generating a classification report
cr = pd.DataFrame(classification_report(y, y_pred, output_dict=True, target_names=target_names)).T
# Handling accuracy based on encoded_target class attribute
if self.encoded_target:
n_classes = len(cr) - 4
acc = [accuracy_score(y.T[i], y_pred.T[i]) for i in range(n_classes)]
else:
n_classes = len(cr) - 3
acc = cr.loc['accuracy', :].values
acc = [acc[0]] * n_classes
# Customizing classification report
cr_custom = cr.iloc[:n_classes, :-1]
cr_custom['model'] = model_name
cr_custom.reset_index(inplace=True)
cr_custom.columns = ['class'] + list(cr_custom.columns[1:])
# Computing accuracy for each class
cr_custom['accuracy'] = acc
cr_custom['approach'] = f'Treino {cv} K-folds'
cr_cols = ['model', 'approach', 'class', 'accuracy', 'precision', 'recall', 'f1-score']
train_performance = cr_custom.loc[:, cr_cols]
# Adding up time measurement on final DataFrame
t1 = time.time()
delta_time = t1 - t0
train_performance['total_time'] = round(delta_time, 3)
logger.info(f'Sucessfully computed metrics on training data in {round(delta_time, 3)} seconds')
return train_performance
except Exception as e:
logger.error(f'Error on computing metrics. Exception: {e}')
def compute_val_performance(self, model_name, estimator, X, y, target_names=None):
"""
Computes metrics on validation datasets for multiclass classifiers.
In practice, this method is usually applied on a top layer of the class, or in other words, it is usually
executed by another method for extracting metrics on training and validating data
Parameters
----------
:param model_name: model key on self.classifiers_info class attribute [type: string]
:param estimator: model estimator to be evaluated [type: object]
:param X: model features for validation data [type: np.array]
:param y: target array for validation data [type: np.array]
:param target_names: custom names for target indices [type: list, default=None]
Return
------
:return val_performance: dataset with metrics computed using on validation set [type: pd.DataFrame]
Application
-----------
# Initializing and training models
trainer = MulticlassClassifier()
trainer.fit(model, X_train, y_train)
val_performance = trainer.compute_val_performance(model_name, estimator, X_val, y_val)
"""
# Computing metrics
logger.debug(f'Computing metrics on {model_name} using validation data')
try:
# Initialing time measuring and making predictions
t0 = time.time()
y_pred = estimator.predict(X)
# Generating a classification report
cr = pd.DataFrame(classification_report(y, y_pred, output_dict=True, target_names=target_names)).T
# Extracting accuracy based on encoded_target class attribute
if self.encoded_target:
n_classes = len(cr) - 4
acc = [accuracy_score(y.T[i], y_pred.T[i]) for i in range(n_classes)]
else:
n_classes = len(cr) - 3
acc = cr.loc['accuracy', :].values
acc = [acc[0]] * n_classes
# Customizing classification report
cr_custom = cr.iloc[:n_classes, :-1]
cr_custom['model'] = model_name
cr_custom.reset_index(inplace=True)
cr_custom.columns = ['class'] + list(cr_custom.columns[1:])
# Computing accuracy for each class
cr_custom['accuracy'] = acc
cr_custom['approach'] = f'Validation set'
cr_cols = ['model', 'approach', 'class', 'accuracy', 'precision', 'recall', 'f1-score']
val_performance = cr_custom.loc[:, cr_cols]
# Adding up time measurement on DataFrame
t1 = time.time()
delta_time = t1 - t0
val_performance['total_time'] = round(delta_time, 3)
logger.info(f'Sucessfully computed metrics on validation data in {round(delta_time, 3)} seconds')
return val_performance
except Exception as e:
logger.error(f'Error on computing metrics. Exception: {e}')
def evaluate_performance(self, X_train, y_train, X_val, y_val, cv=5, target_names=None, **kwargs):
"""
Computes classification metrics for training and validation data
Parameters
----------
:param X_train: model features for training data [type: np.array]
:param y_train: target array for training data [type: np.array]
:param X_val: model features for validation data [type: np.array]
:param y_val: target array for validation data [type: np.array]
:param cv: K-folds used on cross validation step [type: int, default=5]
:param target_names: custom names for target indices [type: list, default=None]
:param **kwargs: additional parameters
:arg save: boolean flag for saving files on locak disk [type: bool, default=True]
:arg output_path: path for files to be saved [type: string, default=cwd() + 'output/metrics']
:arg output_filename: name of csv file to be saved [type: string, default='metrics.csv']
Return
------
:return df_performances: dataset with metrics obtained using training and validation data [type: pd.DataFrame]
Application
-----------
# Training model and evaluating performance on training and validation sets
trainer = MulticlassClassifier()
trainer.fit(estimator, X_train, y_train)
# Generating a performance dataset
df_performance = trainer.evaluate_performance(X_train, y_train, X_val, y_val)
"""
# Creating an empty DataFrame for storing metrics
df_performances = pd.DataFrame({})
# Iterating over the trained classifiers on the class attribute dictionary
for model_name, model_info in self.classifiers_info.items():
# Verifying if the model was already trained (model_info dict will have the key 'train_performance')
if 'train_performance' in model_info.keys():
df_performances = df_performances.append(model_info['train_performance'])
df_performances = df_performances.append(model_info['val_performance'])
continue
# Returning the model to be evaluated
try:
estimator = model_info['estimator']
except KeyError as e:
logger.error(f'Error on returning the key "estimator" from model_info dict. Model {model_name} was not trained')
continue
# Computing performance on training and validation sets
train_performance = self.compute_train_performance(model_name, estimator, X_train, y_train, cv=cv,
target_names=target_names)
val_performance = self.compute_val_performance(model_name, estimator, X_val, y_val,
target_names=target_names)
# Setting up results on classifiers_info class dict
self.classifiers_info[model_name]['train_performance'] = train_performance
self.classifiers_info[model_name]['val_performance'] = val_performance
# Building a DataFrame with model metrics
model_performance = train_performance.append(val_performance)
df_performances = df_performances.append(model_performance)
df_performances['anomesdia_datetime'] = datetime.now()
# Saving some attributes on classifiers_info for maybe retrieving in the future
model_data = {
'X_train': X_train,
'y_train': y_train,
'X_val': X_val,
'y_val': y_val
}
model_info['model_data'] = model_data
# Saving results if applicable
if 'save' in kwargs and bool(kwargs['save']):
output_path = kwargs['output_path'] if 'output_path' in kwargs else os.path.join(os.getcwd(), 'output/metrics')
output_filename = kwargs['output_filename'] if 'output_filename' in kwargs else 'metrics.csv'
save_data(df_performances, output_path=output_path, filename=output_filename)
return df_performances
def feature_importance(self, features, top_n=-1, **kwargs):
"""
Extracts the feature importance method from trained models
Parameters
----------
:param features: list of features considered on training step [type: list]
:param top_n: parameter for filtering just top N features most important [type: int, default=-1]
*obs: when this parameter is equal to -1, all features are considered
:param **kwargs: additional parameters
:arg save: boolean flag for saving files on locak disk [type: bool, default=True]
:arg output_path: path for files to be saved [type: string, default=cwd() + 'output/metrics']
:arg output_filename: name of csv file to be saved [type: string, default='top_features.csv']
Return
------
:return: all_feat_imp: pandas DataFrame com a análise de feature importance dos modelos [type: pd.DataFrame]
Application
-----------
# Training models
trainer = MulticlassClassifier()
trainer.fit(estimator, X_train, y_train)
# Returning a feature importance dataset for all models at once
feat_imp = trainer.feature_importance(features=MODEL_FEATURES, top_n=20)
"""
# Creating an empty DataFrame for storing feature importance analysis
feat_imp = pd.DataFrame({})
all_feat_imp = | pd.DataFrame({}) | pandas.DataFrame |
import numpy as np
import pytest
from pandas.compat import lrange
import pandas as pd
from pandas import Series, Timestamp
from pandas.util.testing import assert_series_equal
@pytest.mark.parametrize("val,expected", [
(2**63 - 1, 3),
(2**63, 4),
])
def test_loc_uint64(val, expected):
# see gh-19399
s = Series({2**63 - 1: 3, 2**63: 4})
assert s.loc[val] == expected
def test_loc_getitem(test_data):
inds = test_data.series.index[[3, 4, 7]]
assert_series_equal(
test_data.series.loc[inds],
test_data.series.reindex(inds))
assert_series_equal(test_data.series.iloc[5::2], test_data.series[5::2])
# slice with indices
d1, d2 = test_data.ts.index[[5, 15]]
result = test_data.ts.loc[d1:d2]
expected = test_data.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = test_data.series > test_data.series.median()
assert_series_equal(test_data.series.loc[mask], test_data.series[mask])
# ask for index value
assert test_data.ts.loc[d1] == test_data.ts[d1]
assert test_data.ts.loc[d2] == test_data.ts[d2]
def test_loc_getitem_not_monotonic(test_data):
d1, d2 = test_data.ts.index[[5, 15]]
ts2 = test_data.ts[::2][[1, 2, 0]]
msg = r"Timestamp\('2000-01-10 00:00:00'\)"
with pytest.raises(KeyError, match=msg):
ts2.loc[d1:d2]
with pytest.raises(KeyError, match=msg):
ts2.loc[d1:d2] = 0
def test_loc_getitem_setitem_integer_slice_keyerrors():
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).all()
# so is this
cp = s.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = s.iloc[2:6]
result2 = s.loc[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + | lrange(5, 10) | pandas.compat.lrange |
"""
Dates and Times
- A special type of categorical variable are those that instead of taking traditional labels, like color (blue, red),
or city (London, Manchester), take dates and / or time as values.
- For example, date of birth ('29-08-1987', '12-01-2012'), or date of application ('2016-Dec', '2013-March').
- Datetime variables can contain dates only, time only, or date and time.
We don't usually work with a datetime variable in their raw format because:
- Date variables contain a huge number of different categories
- We can extract much more information from datetime variables by preprocessing them correctly
In addition,
- often, date variables will contain dates that were not present in the dataset used to train the machine learning model.
- In fact, date variables will usually contain dates placed in the future, respect to the dates in the training dataset.
- Therefore, the machine learning model will not know what to do with them, because it never saw them while being trained.
=============================================================================
In this example, we will use data from the peer-o-peer finance company Lending Club to inspect nominal categorical variables
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# let's load the Lending Club dataset with a few selected columns
# Variable definitions:
#-------------------------
# loan_amnt: loan amount requested by borrower
# grade: risk markets in which borrowers are placed
# purpose: intended use of the loan
# issue_d: date the loan was issued
# last_pymnt_d: date of last payment towards repyaing the loan
use_cols = ['loan_amnt', 'grade', 'purpose', 'issue_d', 'last_pymnt_d']
data = pd.read_csv('dataset/loan.csv/loan.csv', usecols=use_cols)
data.head()
"""
loan_amnt grade issue_d purpose last_pymnt_d
0 5000.0 B Dec-2011 credit_card Jan-2015
1 2500.0 C Dec-2011 car Apr-2013
2 2400.0 C Dec-2011 small_business Jun-2014
3 10000.0 C Dec-2011 other Jan-2015
4 3000.0 B Dec-2011 other Jan-2016 """
# pandas assigns type 'object' when reading dates and considers them strings.
# Let's have a look
data.dtypes
"""
loan_amnt float64
grade object
issue_d object
purpose object
last_pymnt_d object
dtype: object """
"""
OBSERVATIONS:
- Both issue_d and last_pymnt_d are casted as objects. Therefore, pandas will treat them as strings or categorical variables.
- In order to instruct pandas to treat them as dates, we need to re-cast them into datetime format. See below.
"""
# now let's parse the dates, currently coded as strings, into datetime format
# this will allow us to make some analysis afterwards
data['issue_dt'] = pd.to_datetime(data.issue_d)
data['last_pymnt_dt'] = | pd.to_datetime(data.last_pymnt_d) | pandas.to_datetime |
'''
This is a follow up of https://letianzj.github.io/portfolio-management-one.html
It backtests four portfolios: GMV, tangent, maximum diversification and risk parity
and compare them with equally-weighted portfolio
'''
import os
import numpy as np
import pandas as pd
from datetime import datetime
import backtrader as bt
from scipy.optimize import minimize
from IPython.core.display import display, HTML
# set browser full width
display(HTML("<style>.container { width:100% !important; }</style>"))
class EndOfMonth(object):
def __init__(self, cal):
self.cal = cal
def __call__(self, d):
if self.cal.last_monthday(d):
return True
return False
# ------------------ help functions -------------------------------- #
def minimum_vol_obj(wo, cov):
w = wo.reshape(-1, 1)
sig_p = np.sqrt(np.matmul(w.T, np.matmul(cov, w)))[0, 0] # portfolio sigma
return sig_p
def maximum_sharpe_negative_obj(wo, mu_cov):
w = wo.reshape(-1, 1)
mu = mu_cov[0].reshape(-1, 1)
cov = mu_cov[1]
obj = np.matmul(w.T, mu)[0, 0]
sig_p = np.sqrt(np.matmul(w.T, np.matmul(cov, w)))[0, 0] # portfolio sigma
obj = -1 * obj/sig_p
return obj
def maximum_diversification_negative_obj(wo, cov):
w = wo.reshape(-1, 1)
w_vol = np.matmul(w.T, np.sqrt(np.diag(cov).reshape(-1, 1)))[0, 0]
port_vol = np.sqrt(np.matmul(w.T, np.matmul(cov, w)))[0, 0]
ratio = w_vol / port_vol
return -ratio
# this is also used to verify rc from optimal w
def calc_risk_contribution(wo, cov):
w = wo.reshape(-1, 1)
sigma = np.sqrt(np.matmul(w.T, np.matmul(cov, w)))[0, 0]
mrc = np.matmul(cov, w)
rc = (w * mrc) / sigma # element-wise multiplication
return rc
def risk_budget_obj(wo, cov_wb):
w = wo.reshape(-1, 1)
cov = cov_wb[0]
wb = cov_wb[1].reshape(-1, 1) # target/budget in percent of portfolio risk
sig_p = np.sqrt(np.matmul(w.T, np.matmul(cov, w)))[0, 0] # portfolio sigma
risk_target = sig_p * wb
asset_rc = calc_risk_contribution(w, cov)
f = np.sum(np.square(asset_rc - risk_target.T)) # sum of squared error
return f
class PortfolioOptimization(bt.Strategy):
params = (
('nlookback', 200),
('model', 'gmv'), # gmv, sharpe, diversified, risk_parity
('printlog', False), # comma is required
)
def __init__(self):
self.buyprice = None
self.buycomm = None
self.bar_executed = None
self.val_start = None
self.add_timer(
when=bt.Timer.SESSION_START, # before next
allow=EndOfMonth(cal = bt.TradingCalendar())
)
def log(self, txt, dt=None, doprint=False):
''' Logging function fot this strategy'''
if self.params.printlog or doprint:
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def start(self):
self.val_start = self.broker.get_cash() # keep the starting cash
print(f'================================== start portfolio {self.p.model} ======================================')
def notify_trade(self, trade):
if not trade.isclosed:
return
self.log('OPERATION PROFIT, GROSS %.2f, NET %.2f' % (trade.pnl, trade.pnlcomm))
def notify_order(self, order):
if order.status in [order.Submitted, order.Accepted]:
# Buy/Sell order submitted/accepted to/by broker - Nothing to do
return
# Check if an order has been completed
# Attention: broker could reject order if not enough cash
if order.status in [order.Completed]: # order.Partial
if order.isbuy():
self.log(
'BUY EXECUTED, Price: %.2f, Size: %.0f, Cost: %.2f, Comm %.2f, RemSize: %.0f, RemCash: %.2f' %
(order.executed.price,
order.executed.size,
order.executed.value,
order.executed.comm,
order.executed.remsize,
self.broker.get_cash()))
self.buyprice = order.executed.price
self.buycomm = order.executed.comm
else: # Sell
self.log('SELL EXECUTED, Price: %.2f, Size: %.0f, Cost: %.2f, Comm %.2f, RemSize: %.0f, RemCash: %.2f' %
(order.executed.price,
order.executed.size,
order.executed.value,
order.executed.comm,
order.executed.remsize,
self.broker.get_cash()))
self.bar_executed = len(self)
elif order.status in [order.Canceled, order.Expired, order.Margin, order.Rejected]:
self.log('Order Failed')
def next(self):
pass
def notify_timer(self, timer, when, *args, **kwargs):
print('{} strategy notify_timer with tid {}, when {} cheat {}'.
format(self.data.datetime.datetime(), timer.p.tid, when, timer.p.cheat))
if len(self.datas[0]) < self.p.nlookback: # not enough bars
return
total_value = self.broker.getvalue()
i = 0
prices = None
for data in self.datas:
price = data.close.get(0, self.p.nlookback)
price = np.array(price)
if i == 0:
prices = price
else:
prices = np.c_[prices, price]
i += 1
rets = prices[1:,:]/prices[0:-1, :]-1.0
mu = np.mean(rets, axis=0)
cov = np.cov(rets.T)
n_stocks = len(self.datas)
TOL = 1e-12
w = np.ones(n_stocks) / n_stocks # default
try:
if self.p.model == 'gmv':
w0 = np.ones(n_stocks) / n_stocks
cons = ({'type': 'eq', 'fun': lambda w: np.sum(w) - 1.0}, {'type': 'ineq', 'fun': lambda w: w})
res = minimize(minimum_vol_obj, w0, args=cov, method='SLSQP', constraints=cons, tol=TOL, options={'disp': True})
if not res.success:
self.log(f'{self.p.model} Optimization failed')
w = res.x
elif self.p.model == 'sharpe':
w0 = np.ones(n_stocks) / n_stocks
cons = ({'type': 'eq', 'fun': lambda w: np.sum(w) - 1.0}, {'type': 'ineq', 'fun': lambda w: w})
res = minimize(maximum_sharpe_negative_obj, w0, args=[mu, cov], method='SLSQP', constraints=cons, tol=TOL, options={'disp': True})
w = res.x
elif self.p.model == 'diversified':
w0 = np.ones(n_stocks) / n_stocks
cons = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1.0}) # weights sum to one
bnds = tuple([(0, 1)] * n_stocks)
res = minimize(maximum_diversification_negative_obj, w0, bounds=bnds, args=cov, method='SLSQP', constraints=cons, tol=TOL, options={'disp': True})
w = res.x
elif self.p.model == 'risk_parity':
w0 = np.ones(n_stocks) / n_stocks
w_b = np.ones(n_stocks) / n_stocks # risk budget/target, percent of total portfolio risk (in this case equal risk)
# bnds = ((0,1),(0,1),(0,1),(0,1)) # alternative, use bounds for weights, one for each stock
cons = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1.0}, {'type': 'ineq', 'fun': lambda x: x})
res = minimize(risk_budget_obj, w0, args=[cov, w_b], method='SLSQP', constraints=cons, tol=TOL, options={'disp': True})
w = res.x
except Exception as e:
self.log(f'{self.p.model} Optimization failed; {str(e)}')
stock_value = total_value * 0.95
i = 0
for data in self.datas:
target_pos = (int)(stock_value * w[i] / data.close[0])
self.order_target_size(data=data, target=target_pos)
self.log('REBALANCE ORDER SENT, %s, Price: %.2f, Percentage: %.2f, Target Size: %.2f' %
(data._name,
data.close[0],
w[i],
target_pos))
i += 1
def stop(self):
# calculate the actual returns
print(self.analyzers)
roi = (self.broker.get_value() / self.val_start) - 1.0
self.log('ROI: {:.2f}%'.format(100.0 * roi))
self.log(f'{self.p.model} ending Value {self.broker.getvalue():.2f}', doprint=True)
if __name__ == '__main__':
param_opt = False
perf_eval = True
initial_capital = 100000.0
etfs = ['SPY', 'EFA', 'TIP', 'GSG', 'VNQ']
benchmark = etfs
strategies = ['gmv', 'sharpe', 'diversified', 'risk_parity']
dict_results = dict()
for sname in strategies:
dict_results[sname] = dict()
cerebro = bt.Cerebro()
# Add the Data Feed to Cerebro
# SPY: S&P 500
# EFA: MSCI EAFE
# TIP: UST
# GSG: GSCI
# VNQ: REITs
for s in etfs:
# Create a Data Feed
data = bt.feeds.YahooFinanceCSVData(
dataname=os.path.join('../data/', f'{s}.csv'),
fromdate=datetime(2010, 1, 1),
todate=datetime(2019, 12, 31),
reverse=False)
cerebro.adddata(data, name=s)
# Set our desired cash start
cerebro.broker.setcash(initial_capital)
# Set the commission - 0.1% ... divide by 100 to remove the %
cerebro.broker.setcommission(commission=0.001)
# Print out the starting conditions
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Add a strategy
cerebro.addstrategy(PortfolioOptimization, model=sname, printlog=True)
# Add Analyzer
cerebro.addanalyzer(bt.analyzers.SharpeRatio, _name='SharpeRatio')
cerebro.addanalyzer(bt.analyzers.DrawDown, _name='DrawDown')
cerebro.addanalyzer(bt.analyzers.PositionsValue, _name='positions', cash=True)
cerebro.addanalyzer(bt.analyzers.PyFolio, _name='pyfolio')
# Run over everything
results = cerebro.run()
# Print out the final result
strat = results[0]
print('Final Portfolio Value: %.2f, Sharpe Ratio: %.2f, DrawDown: %.2f, MoneyDown %.2f' %
(cerebro.broker.getvalue(),
strat.analyzers.SharpeRatio.get_analysis()['sharperatio'],
strat.analyzers.DrawDown.get_analysis()['drawdown'],
strat.analyzers.DrawDown.get_analysis()['moneydown']))
pyfoliozer = strat.analyzers.getbyname('pyfolio')
returns, positions, transactions, gross_lev = pyfoliozer.get_pf_items()
# somehow pyfolio analyzer doesn't handle well multi-assets
df_positions = pd.DataFrame.from_dict(strat.analyzers.positions.get_analysis(), orient='index')
df_positions.columns = etfs+['cash']
returns = returns[transactions.index[0]:] # count from first trade
# returns.index = returns.index.tz_localize(None) # # remove tzinfo; tz native
df_positions.index = df_positions.index.map(lambda x: datetime.combine(x, datetime.min.time()))
df_positions.index = df_positions.index.tz_localize('UTC')
df_positions = df_positions.loc[returns.index]
# save immediate results
dict_results[sname]['returns'] = returns
dict_results[sname]['positions'] = df_positions
dict_results[sname]['transactions'] = transactions
# Compare four portfolios with equal weighted
import matplotlib.pyplot as plt
import empyrical as ep
import pyfolio as pf
bm_ret = None
df_constituents = pd.DataFrame()
for s in etfs:
datapath = os.path.join('../data/', f'{s}.csv')
df_temp = pd.read_csv(datapath, index_col=0)
df_temp = df_temp['Adj Close']
df_temp.name = s
df_constituents = pd.concat([df_constituents, df_temp], axis=1)
df_constituents_ret = df_constituents.pct_change()
df_constituents_ret.index = | pd.to_datetime(df_constituents_ret.index) | pandas.to_datetime |
import os
import locale
import codecs
import nose
import numpy as np
from numpy.testing import assert_equal
import pandas as pd
from pandas import date_range, Index
import pandas.util.testing as tm
from pandas.tools.util import cartesian_product, to_numeric
CURRENT_LOCALE = locale.getlocale()
LOCALE_OVERRIDE = os.environ.get('LOCALE_OVERRIDE', None)
class TestCartesianProduct(tm.TestCase):
def test_simple(self):
x, y = list('ABC'), [1, 22]
result = cartesian_product([x, y])
expected = [np.array(['A', 'A', 'B', 'B', 'C', 'C']),
np.array([1, 22, 1, 22, 1, 22])]
assert_equal(result, expected)
def test_datetimeindex(self):
# regression test for GitHub issue #6439
# make sure that the ordering on datetimeindex is consistent
x = date_range('2000-01-01', periods=2)
result = [Index(y).day for y in cartesian_product([x, x])]
expected = [np.array([1, 1, 2, 2]), np.array([1, 2, 1, 2])]
assert_equal(result, expected)
class TestLocaleUtils(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestLocaleUtils, cls).setUpClass()
cls.locales = tm.get_locales()
if not cls.locales:
raise nose.SkipTest("No locales found")
tm._skip_if_windows()
@classmethod
def tearDownClass(cls):
super(TestLocaleUtils, cls).tearDownClass()
del cls.locales
def test_get_locales(self):
# all systems should have at least a single locale
assert len(tm.get_locales()) > 0
def test_get_locales_prefix(self):
if len(self.locales) == 1:
raise nose.SkipTest("Only a single locale found, no point in "
"trying to test filtering locale prefixes")
first_locale = self.locales[0]
assert len(tm.get_locales(prefix=first_locale[:2])) > 0
def test_set_locale(self):
if len(self.locales) == 1:
raise nose.SkipTest("Only a single locale found, no point in "
"trying to test setting another locale")
if LOCALE_OVERRIDE is not None:
lang, enc = LOCALE_OVERRIDE.split('.')
else:
lang, enc = 'it_CH', 'UTF-8'
enc = codecs.lookup(enc).name
new_locale = lang, enc
if not tm._can_set_locale(new_locale):
with tm.assertRaises(locale.Error):
with tm.set_locale(new_locale):
pass
else:
with tm.set_locale(new_locale) as normalized_locale:
new_lang, new_enc = normalized_locale.split('.')
new_enc = codecs.lookup(enc).name
normalized_locale = new_lang, new_enc
self.assertEqual(normalized_locale, new_locale)
current_locale = locale.getlocale()
self.assertEqual(current_locale, CURRENT_LOCALE)
class TestToNumeric(tm.TestCase):
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
with tm.assertRaises(ValueError):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
with tm.assertRaises(ValueError):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
def test_type_check(self):
# GH 11776
df = | pd.DataFrame({'a': [1, -3.14, 7], 'b': ['4', '5', '6']}) | pandas.DataFrame |
# importing the required modules
import requests
from bs4 import BeautifulSoup
import pandas as pd
from selenium import webdriver
import time
options = webdriver.ChromeOptions()
options.add_argument("headless")
pd.options.display.max_columns = 999
pd.options.display.max_rows = 999
df = pd.read_csv('New_profile_links.csv')
pl = list(df["0"])
basic_data = []
main_data = []
product_review = []
profile_all = []
#all_project = []
review_all = []
main_category = []
urls = []
project_data = []
length = len(pl)
for i in range(length):
try:
one_user = []
print("Serial :",i)
url = pl[i]
print(url)
urls.append(url)
driver = webdriver.Chrome(r'C:\Users\David\chromedriver_win32\chromedriver.exe', chrome_options=options)
driver.get(url)
time.sleep(5)
######################################### basic data section ########################################
#basic informations
basic = []
source_code = driver.page_source
soup = BeautifulSoup(source_code, 'html.parser')
except:
print("Error in webdriver")
continue
try:
#name
name = soup.find_all('strong', class_ = 'userName--1ZA07')
for n in name:
basic.append(n.text)
#category
category = soup.find_all('strong', class_ = 'introCategory--F81Ky')
for e in category:
basic.append(e.text)
#specialty
ba = []
sp = soup.find_all('div', class_ = 'categoryName--1zWtA')
for m in sp:
ba.append(m.text)
basic.append(ba)
#rating
rating = soup.find_all('div', class_ = 'itemRating--360UA itemRating--2-rFv typeLarge--1cEMN')
for k in rating:
km = k.text
basic.append(km.replace("평균 평점", ""))
#Reviews and consultations
reviews = soup.find_all('span', class_ = 'statsNum--32OX2')
for kk in reviews:
basic.append(kk.text)
#appending basic data of all user
basic_data.append(basic)
######################################### main ########################################
### main info data for one user
maininfo = []
uh = ["대표자","상호명","사업자등록번호","통신판매업번호", "사업장 주소", "고객센터",'메일']
#main section info
nn = []
infos = soup.find_all('ul', class_ = 'productInfoList--1-H-D')
for f in infos:
li = f.find_all('li')
#each list item
for i in range(len(li)):
ii = li[i]
val = uh[i]
head = ii.find_all("span", class_ = "title--2YCH3")
maini = ii.find_all("span", class_ = "text--1z2Eb")
for h in head:
if h.text != val:
if [k, " "] not in nn:
nn.append("NA")
else:
for j in maini:
if j.text not in nn:
if j.text == None or j.text == "" or j.text == " ":
nn.append("NA")
else:
nn.append(j.text)
main_data.append(nn)
######################################### count product section ########################################
#count product and review section
products = []
tt = soup.find_all('div', class_ = "list--e6w5E")
for t in tt:
cc = t.find_all('a', class_='item--3Oz2i')
for cd in cc:
ce = cd.find_all('div', class_ = "count--2w5o6")
for i in ce:
products.append(i.text)
product_review.append(products)
######################################### Profile data section ########################################
#profile informations
profile_heading = []
profile_text = []
firm_name = []
firm_text = []
div = soup.find_all('div', class_ = 'sectionIntroduce--3_qQB')
for heading in div:
indiv = heading.find_all('div', class_ = 'introduceMain--g3aND')
for i in indiv:
head = i.find_all('strong', class_ = 'introduceMainTitle--2MZc-')
for h in head:
profile_heading.append(h.text)
text = i.find_all('p', class_ = 'introduceText--2R5pY')
for ii in text:
profile_text.append(ii.text)
careerdiv = soup.find_all('div', class_ = ['profileCareer--3_uFh','isExpert--2GkDA'])
for i in careerdiv:
cd = i.find_all('div', class_ = 'profileBox--1jlog')
for j in cd:
cd = j.find_all('div', class_ = 'careerJob--2-hX4')
for c in cd:
firm_name.append(c.text)
cui = j.find_all('ul', class_ = 'careerList--2dpZg')
for cc in cui:
firm_text.append(cc.text)
profile_all.append([profile_heading, profile_text, firm_name, firm_text])
######################################### Project data section ########################################
### Project data for one user
projects = soup.find_all('div', class_ = 'listArea--peDdh')
#projects and consultations
all_project = []
for y in projects:
one = []
yy = y.find_all('div', class_ = 'item--1ZJSx')
for t in yy:
project_item = []
tdiv = t.find_all('div', class_ =['itemTitle--2vWBq','elip2--nFWXY'])
for td in tdiv:
project_title = td.text
project_item.append(project_title)
ratdiv = t.find_all('div', class_ =['itemGroup--2RnIL','ItemGroup_itemGroup--1f-on'])
for rd in ratdiv:
ratscore = rd.find_all("div", class_ = "itemRating--360UA")
for r in ratscore:
b = r.text
if "평균 평점" in b:
b = b.replace("평균 평점", " ")
project_item.append(b)
ratreview = rd.find_all("div", class_ = "itemCount--2HsJv")
for rr in ratreview:
c = rr.text
if "후기" in c:
c = c.replace("후기", " ")
project_item.append(c)
feediv = t.find_all('span', class_ =['priceInner--1HE2v'])
for fd in feediv:
fee = fd.find_all("span", class_=["priceNum--1rXJI","ItemPrice_priceNum--2OFHI"])
for f in fee:
project_item.append(f.text)
discount = fd.find_all("em", class_="discountPercent--3n0bl")
for dis in discount:
project_item.append(dis.text)
actualPrize = fd.find_all("span", class_="beforeDiscount--W1C4G")
for fp in actualPrize:
project_item.append(fp.text)
one.append([*project_item])
all_project.append([*one])
proj = []
for i in range(len(all_project)):
data = all_project[i]
for j in range(len(data)):
dj = data[j]
for k in range(len(dj)):
bb = dj[k]
proj.append(bb)
lis = ["평균 평점","후기","판매가","원할인률","할인 전 가격", "할인률"]
for i in range(len(proj)):
for j in range(len(lis)):
if lis[j] in proj[i]:
proj[i] = proj[i].replace(lis[j], " ")
project_data.append(proj)
########################################## review section ########################################
#review object
review_obj = []
reviews_user = []
reviews_rating = []
reviews_heading = []
reviews_text = []
rdiv = soup.find_all('div', class_ = "listSection--kViCl")
for eachr in rdiv:
ee = eachr.find_all('div', class_ = "reviewItem--1OwNO")
for each in ee:
name = each.find_all('span', class_ = ["item--3sQA9 ","nickname--2OOe6"])
for nm in name:
reviews_user.append(nm.text)
rating = each.find_all('div', class_ = ["expertPoint--2Zrvr","expertPoint--13H3V"])
for r in rating:
reviews_rating.append(r.text)
head = each.find_all('div', class_ = "reviewTitle--qv3Pk")
for r in head:
reviews_heading.append(r.text)
commentdiv = each.find_all('p', class_ = "reviewText--28mzN")
for ecom in commentdiv:
reviews_text.append(ecom.text)
for i in range(len(reviews_user)):
try:
review_obj.append(reviews_user[i])
if "평점" in reviews_rating[i]:
rating = reviews_rating[i].replace("평점", "")
review_obj.append(rating)
else:
review_obj.append(reviews_rating)
review_obj.append(reviews_heading[i])
review_obj.append(reviews_text[i])
except:
continue
review_all.append(review_obj)
######################################### driver close section ########################################
driver.quit()
######################################### Final dataframe section ########################################
except:
continue
try:
#Url dataframe section
urldf = pd.DataFrame(urls)
urldf.columns = ["Url"]
#basic dataframe section
basicdf = pd.DataFrame(basic_data)
basicdf.columns = ["Name","subcategory","Specialty","Review_score","Review_count","Consultations"]
#main dataframe section
maindf = pd.DataFrame(main_data)
maindf.columns =["Representative", "Company_name", "Business_registration_number", "Mail_order_number", "Business_address", "Customer_Center",'Mail']
#product review dataframe section
prdf = pd.DataFrame(product_review)
prdf.columns =["Class_Count", "Total_User_Reviews"]
# # profile dataframe section
profiledf = pd.DataFrame(profile_all)
profiledf.columns =["Profile", "Details", "Firm", "Education/Career"]
#projects dataframe section
projdf = pd.DataFrame(project_data)
reviewdf = | pd.DataFrame(review_all) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/10_visual.ipynb (unless otherwise specified).
__all__ = ['change_xaxis_pos', 'BaseViz', 'Missing', 'Correlation', 'plot_barh_from_df', 'plot_barh_from_series',
'Histogram', 'cal_histogram', 'plot_hist', 'BoxnWhisker', 'plot_boxnwhisker', 'KernelDensityEstimation',
'plot_kde', 'plot_roc_curve', 'plot_line', 'plot_bisectrix', 'plot_waterfall', 'plot_SKTree', 'plot_LGBTree',
'plot_scatter', 'plot_legend']
# Cell
from .utils import *
import numpy as np
import pandas as pd
import pdb
from .pre_processing import *
from .dataset import *
from .learner import *
import matplotlib.pyplot as plt
import seaborn as sns
import waterfall_chart
import graphviz
from sklearn.tree import export_graphviz
import IPython
import re
# Cell
def change_xaxis_pos(top):
if top:
plt.rcParams['xtick.bottom'] = plt.rcParams['xtick.labelbottom'] = False
plt.rcParams['xtick.top'] = plt.rcParams['xtick.labeltop'] = True
else:
plt.rcParams['xtick.bottom'] = plt.rcParams['xtick.labelbottom'] = True
plt.rcParams['xtick.top'] = plt.rcParams['xtick.labeltop'] = False
# Cell
class BaseViz:
def __init__(self, *karg, **kargs):
self.data = data
@classmethod
def from_learner(cls, *karg, **kargs): pass
@classmethod
def from_df(cls, *karg, **kargs):
data = cls.calculate(*karg, **kargs)
return cls(data)
@classmethod
def from_series(cls, *karg, **kargs): pass
@staticmethod
def calculate(*karg, **kargs): pass
def plot(self, *args, **kagrs): pass
# Cell
class Missing(BaseViz):
@staticmethod
def calculate(df):
df_miss = df.isnull().sum()/len(df)*100
df_miss = df_miss[df_miss > 0]
if df_miss.shape[0] == 0: print('no missing data'); return None
else:
data = pd.DataFrame({'feature':df_miss.index, 'missing percent':df_miss.values})
return cls(ResultDF(data, 'missing percent'))
def plot(self): return plot_barh_from_df(self.data())
# Cell
class Correlation(BaseViz):
@staticmethod
def calculate(df, target):
correlations = df.corr()[target]
corr_df = pd.DataFrame({'feature': correlations.index, 'corr':correlations.values})
corr_df['neg'] = corr_df['corr'] < 0
corr_df['corr'] = abs(corr_df['corr'])
corr_df = corr_df[corr_df['column'] != target]
return ResultDF(corr_df, 'corr')
def plot(self): return plot_barh_from_df(self.data())
# Cell
def plot_barh_from_df(df, width = 20, height_ratio = 4):
change_xaxis_pos(True)
sort_asc(df).plot(x = df.columns[0],
kind='barh',
figsize=(width, df.shape[0]//height_ratio),
legend=False)
change_xaxis_pos(False)
# Cell
def plot_barh_from_series(features, series, figsize = None, absolute = False, pos_color = 'g', neg_color = 'r'):
if figsize is not None: plt.figure(figsize=figsize)
if type(series) == list: series = np.array(series)
change_xaxis_pos(True)
if not absolute:
argsort = np.argsort(series)
barh = plt.barh([features[s] for s in argsort], series[argsort],color=pos_color)
mask = series[argsort]<0
else:
series_absolute = np.abs(series)
argsort = np.argsort(series_absolute)
mask = series[argsort]<0
barh = plt.barh([features[s] for s in argsort], series_absolute[argsort], color=pos_color)
for i,m in enumerate(mask):
if m: barh[i].set_color(neg_color)
change_xaxis_pos(False)
# Cell
class Histogram(BaseViz):
def __init__(self, data, plot_df, bins):
super().__init__()
self.plot_df, self.bins = plot_df, bins
@classmethod
def from_df(cls, df, cols = None, bins=20):
plot_df = df.copy() if cols is None else df[to_iter(cols)]
data = cls.calculate(plot_df, bins)
return cls(plot_df, data, bins)
@staticmethod
def calculate(df, bins):
result = | pd.DataFrame(columns=['feature', 'division', 'count']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 23 09:50:28 2017
@author: smullally
"""
import sys
import os
import time
import re
import json
import mastAPITools as api
try: # Python 3.x
from urllib.parse import quote as urlencode
from urllib.request import urlretrieve
except ImportError: # Python 2.x
from urllib import pathname2url as urlencode
from urllib import urlretrieve
try: # Python 3.x
import http.client as httplib
except ImportError: # Python 2.x
import httplib
from astropy.table import Table
import numpy as np
import pprint
pp = pprint.PrettyPrinter(indent=4)
import pandas as p
#%%
#I want to try to get a particular Kepler Id through the mastQuery API
#This does not require a cone search, only knowledge of the KIC ID.
kicid='011904151' #Kepler 10
#Step 0 get ra and dec for the kepler ID of interest
#Step one should be to ask if MAST has any data I want
#Step two should be to download the data (i.e. put it in the basket and retrieve)
#Step 0 -- get RA and Dec
objectOfInterest = 'KIC %s' % kicid
resolverRequest = {'service':'Mast.Name.Lookup',
'params':{'input':objectOfInterest,
'format':'json'},
}
headers,resolvedObjectString = api.mastQuery(resolverRequest)
resolvedObject = json.loads(resolvedObjectString)
print("Information about KIC Object")
pp.pprint(resolvedObject)
objRa = resolvedObject['resolvedCoordinate'][0]['ra']
objDec = resolvedObject['resolvedCoordinate'][0]['decl']
#Step 1
#Ask for data products within a cone search of that RA and Dec
coneradius_arcsec = 5
mastRequest = {'service':'Mast.Caom.Cone',
'params':{'ra':objRa,
'dec':objDec,
'radius':coneradius_arcsec/60},
'format':'json',
'pagesize':2000,
'page':1,
'removenullcolumns':True,
'removecache':True}
headers,mastDataString = api.mastQuery(mastRequest)
mastData = json.loads(mastDataString)
pp.pprint(mastData['fields'][:25])
#Limit that search to Kepler data with the original object ID.
#%%
#Limit that data to those with the right target_name and instrument_name
print(mastData.keys())
print("Query status:",mastData['status'])
#Convert the data to pandas dataframe
dfData=p.DataFrame.from_dict(mastData['data'])
print(dfData[:3])
#Create a dataframe of just those I want
wantdata=(dfData['target_name'] == 'kplr' + kicid) & (dfData['instrument_name']=='Kepler')
lcwant=(dfData['t_exptime'] == 1800)
scwant=(dfData['t_exptime'] == 60)
getdata=dfData[wantdata & lcwant]
obsid = np.int(dfData[wantdata & lcwant]['obsid'])
#Request The Products for this observation
productRequest = {'service':'Mast.Caom.Products',
'params':{'obsid':obsid},
'format':'json',
'pagesize':100,
'page':1}
headers,obsProductsString = api.mastQuery(productRequest)
obsProducts = json.loads(obsProductsString)
print("Number of data products:",len(obsProducts["data"]))
print("Product information column names:")
pp.pprint(obsProducts['fields'])
dfProd = | p.DataFrame.from_dict(obsProducts["data"]) | pandas.DataFrame.from_dict |
"""
Copyright 2021 Merck & Co., Inc. Kenilworth, NJ, USA.
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import requests
import json
import pandas as pd
import numpy as np
from scipy.stats import variation, kurtosis, skew
from termcolor import colored
from datetime import datetime
from pytz import timezone
import sys
import os
from urllib.parse import quote_plus
def element_fmt(input):
if (str(input).find("/") > 0):
return quote_plus(str(input))
else:
return str(input)
def validate(value, possible_values, value_name):
if value == "" or value is None:
dataset_error = "Dataset not specified"
print(colored(
"ERROR! " + value_name + " name [" + str(value) + "] not specified", "red"))
return False
elif value not in possible_values:
dataset_error = "Dataset name not valid"
print(colored(
"ERROR! " + value_name + " name [" + str(value) + "] not valid", "red"))
return False
return True
def requestResponsePrint(response, total_run_time, verbose):
if str(response) == "<Response [200]>":
if verbose:
print(colored(
"\nDownload successful! Request completed in " + str(total_run_time), "green"))
elif str(response) == "<Response [401]>":
print(colored( "\nERROR! Unauthorized. Your credentials are either invalid or expired.",
"red"))
elif str(response) == "<Response [404]>":
print(colored("\nERROR! You don't have permission to access the resource you're \
trying to. If you believe this is in error, please contact the \
Data Profiler Team.",
"red"))
elif str(response == "<Response [403]>"):
print(colored("\nERROR! The request had an error due to programming errors or \
cluster component downtime. Please try again, and contact the \
Data Profiler Team if the problem persists.",
"red"))
def map_listtodict(listdict):
'''
Takes a list of dictionaries and converts to dictionary
[{'value': 'val1', 'count': 23},{'value': 'val2', 'count': 2}, ..]
-> {'val1': 23, 'val2': 2}
Parameters:
listdict (list): list of dictinaries with keys as value and count only
Returns:
dictionary: dictionary with keys as value and value as count
'''
valcnt_dict = {}
for valcnt in listdict:
valcnt_dict[valcnt['value']] = valcnt['count']
return valcnt_dict
class Column():
def __init__(self, environment, dataset_name, table_name, column_name, filters={}):
self.column_name = column_name
self.table_name = table_name
self.dataset_name = dataset_name
self.env = environment
self.filters = filters
validated = self.validateData()
self.metadata = self.getColumnMetadata()
if self.filters != {}:
validate_filter = self.validateFilters()
if validate_filter==False:
print (colored("ERROR: this is not valid input", "red"))
if validated==False:
print (colored("ERROR: this is not valid input", "red"))
##### data about the column itself #####
def getColumnMetadata(self):
url = self.env.url + '/v1/columns/{}/{}'.format(self.dataset_name, self.table_name)
response = requests.get(url, headers=self.env.header)
return response.json()[self.column_name]
##### setting filters for listing columns counts #####
def setFilters(self, filters):
self.filters = filters
self.validateData()
##### retrieving data stored within the column metadata #####
def getColumnDataType(self):
return self.metadata['data_type']
def getValueCount(self):
return self.metadata['num_values']
def getUniqueValueCount(self):
return self.metadata['num_unique_values']
def getVisibility(self):
return self.metadata['visibility']
def getUserAccessList(self):
url = self.env.url + "/rules_of_use"
post_data = json.dumps({
"query":"{usersWithAttribute(value:\""+self.getVisibility()+"\"){username}}"
})
response = requests.post(url, headers=self.env.header, data=post_data)
if response.status_code == 401 or response.status_code == 403:
print (colored("Unauthorized: You are not authorized to perform this request,\
please contact the Data Profiler team", "red"))
return None
try:
usernames = [x["username"] for x in json.loads(response.text)["data"]["usersWithAttribute"]]
return usernames
except:
print (colored("There was a {} error processing your \
request".format(response.status_code), "red"))
return None
##### lists a dictionary of column counts with the structures as follows #####
##### [{'value':'value1', 'count':'count1'},...] #####
def listColumnCounts(self):
## filters do not work for this endpoints
post_data = json.dumps({
"dataset": self.dataset_name,
"table": self.table_name,
"column": self.column_name,
"limit": 0,
"sort": "CNT_DESC"
})
url = self.env.url + '/v1/colCounts'
response = requests.post(url, headers=self.env.header, data=post_data)
if response.status_code == 401 or response.status_code == 403:
print (colored("Unauthorized: You are not authorized to perform this request," + \
" please contact the Data Profiler team", "red"))
return None
try:
text_data = response.text
text_data = text_data[:-1]
json_data = json.loads(text_data)
return json_data
except:
print (colored(f"There was a {response.status_code}" +
" error processing your request", "red"))
return None
## get a dictionary of the listed column values and their
## counts that are within the provided range of values
## returns empty list if errors or no values exist
## returns list of dicts: [{"value": "val_1", "count":"count_1"},
## {"value": "val_2", "count":"count_2"}]
def getColumnValuesInRange(self, min_val, max_val):
try:
range_len = float(max_val) - float(min_val)
except:
print (colored("Range values must be numbers", "red"))
return []
if float(max_val) <= float(min_val):
print (colored("Max range value must be greater than the min", "red"))
return []
all_value_count = self.listColumnCounts()
values_in_range = []
for value in all_value_count:
try:
if (float(value["value"]) >= float(min_val) and \
float(value["value"]) < float(max_val)):
values_in_range.append(value)
except:
continue
return values_in_range
def __isint(self, ignore={np.nan, '', ' ', '-', None}, threshold='0.70'):
conversion = {'integer','long'}
dt = self.getColumnDataType()
cnt = 0
icnt = 0
if dt == 'string':
all_value_count = self.listColumnCounts()
for valdict in all_value_count:
if valdict['value'] not in ignore:
cnt += valdict['count']
try :
int(valdict['value'])
icnt += valdict['count']
except:
pass
try:
if icnt/cnt >= float(threshold):
return True
else:
return False
except:
print (colored("Range values must be numbers", "red"))
return None
else:
if dt in conversion:
return True
else:
return False
def __isfloat(self, ignore={np.nan, '', ' ', '-', None}, threshold='0.70'):
conversion = {'integer','float','long'}
dt = self.getColumnDataType()
cnt = 0
fcnt = 0
if dt == 'string':
all_value_count = self.listColumnCounts()
for valdict in all_value_count:
if valdict['value'] not in ignore:
cnt += valdict['count']
try :
float(valdict['value'])
fcnt += valdict['count']
except:
pass
try:
if fcnt/cnt >= float(threshold):
return True
else:
return False
except:
print (colored("Range values must be numbers", "red"))
return None
else:
if dt in conversion:
return True
else:
return False
def __getdatatype(self):
if self.isint():
return int
elif self.isfloat():
return float
elif self.getColumnDataType() == 'string':
return str
else:
return self.getColumnDataType()
##### Lists of valid datasets, tables, and columns #####
def validDatasets(self):
return self.env.getDatasetList()
def validTables(self):
url = self.env.url + '/v1/tables/{}'.format(self.dataset_name)
response = requests.get(url, headers=self.env.header)
return list(response.json().keys())
def validColumns(self):
url = self.env.url + '/v1/columns/{}/{}'.format(self.dataset_name, self.table_name)
response = requests.get(url, headers=self.env.header)
return list(response.json().keys())
##### validates the dataset, table, and column specified on initialization #####
def validateData(self):
valid_datasets = self.validDatasets()
dataset_valid = validate(self.dataset_name, valid_datasets, "Dataset")
if dataset_valid:
valid_tables = self.validTables()
table_valid = validate(self.table_name, valid_tables, "Table")
if table_valid:
valid_columns = self.validColumns()
column_valid = validate(self.column_name, self.validColumns(), "Column")
return dataset_valid & table_valid & column_valid
##### validates the filters the user can choose to set #####
def validateFilters(self):
if self.filters != {}:
filter_keys = [x for x in self.filters]
for key in filter_keys:
valid_filter = validate(key, self.validColumns(), "Filter Column")
if valid_filter==False:
return False
return True
# Check for number of missing/blank values in the column
def __getNAscount(self,blank_types = {'',' ','-',None, np.nan}):
'''
Find missing values present in selected column
Parameters:
blank_types (set): what constitutes missing values
Returns:
int: Returns the number of missing values present
'''
ValCount = self.listColumnCounts()
cnt_all = 0
for vc in ValCount:
if vc['value'] in blank_types:
cnt_all += vc['count']
return cnt_all
class Table():
def __init__(self, environment, dataset_name, table_name, filters={}):
self.table_name = table_name
self.dataset_name = dataset_name
self.env = environment
self.filters = filters
validated = self.validateData()
if validated==False:
print (colored("ERROR: The input data is not valid", "red"))
self.table_info = self.getTableInfo()
self.metadata = self.getTableMetadata()
if self.filters != {}:
validated_filters = validateFilters()
if validated_filters==False:
print (colored("ERROR: The input data is not valid", "red"))
#### get specific information about the inside of the table #####
def getTableInfo(self):
url = self.env.url + '/v1/columns/{}/{}'.format(self.dataset_name, self.table_name)
response = requests.get(url, headers=self.env.header)
return response.json()
##### get metadata about the table #####
def getTableMetadata(self):
url = self.env.url + '/v1/tables/{}'.format(self.dataset_name)
response = requests.get(url, headers=self.env.header)
return response.json()[self.table_name]
##### set filters for loading table rows #####
def setFilters(self, filters):
self.filters = filters
self.validateFilters()
##### get functions to access the information in the table #####
def getColumnList(self):
return list(self.table_info.keys())
def getColumnCount(self):
return len(self.getColumnList())
##### get functions to access the table metadata #####
def getUploadDate(self):
epoch_time = float(self.metadata['load_time'])/1000
return datetime.fromtimestamp(epoch_time)
def getUpdateDate(self):
epoch_time = float(self.metadata['update_time'])/1000
return datetime.fromtimestamp(epoch_time)
def getVisibility(self):
return self.metadata["visibility"]
def getTableCount(self):
return self.metadata["num_tables"]
def getColumnCount(self):
return self.metadata["num_columns"]
def getValueCount(self):
return self.metadata["num_values"]
def getPullTimestamp(self):
epoch_time = float(self.metadata["timestamp"])/1000
return datetime.fromtimestamp(epoch_time)
def getUserAccessList(self):
url = self.env.url + "/rules_of_use"
post_data = json.dumps({
"query":"{usersWithAttribute(value:\""+self.getVisibility()+"\"){username}}"
})
response = requests.post(url, headers=self.env.header, data=post_data)
if response.status_code == 401 or response.status_code == 403:
print (colored("Unauthorized: You are not authorized to perform this request, \
please contact the Data Profiler team", "red"))
return None
try:
usernames = [x["username"] for x in json.loads(response.text)["data"]["usersWithAttribute"]]
return usernames
except:
print (colored("There was a {} error processing your \
request".format(response.status_code), "red"))
return None
##### format data for post requests #####
## If no sample size is given, then the limit is set to 0 which returns all rows
def getPostData(self, sample_size=0):
post_data = json.dumps({
"dataset": self.dataset_name,
"table": self.table_name,
"sort": "CNT_DESC",
"filters": self.filters,
"limit": sample_size
})
return post_data
##### format data for post requests #####
## If no sample size is given, then the limit is set to 0 which returns all rows
def getRowsPostData(self, start_location=None, page_size = 5000):
if page_size >= 10000:
raise ValueError("Rows Page Size must be less than 10,000")
post_data = {
"dataset": self.dataset_name,
"table": self.table_name,
"filters": self.filters,
"limit": page_size,
"start_location": start_location,
"pageSize": page_size
}
# Remove None values (likely start_location)
return json.dumps({k: v for k, v in post_data.items() if v})
##### load the rows from the table #####
def loadRows(self):
if self.filters == {}:
print("\nDownloading ALL ROWS: {}, {}...".format(self.dataset_name, self.table_name))
else:
print("\nDownloading FILTERED TABLE: {} | {} \nFilter(s) \
applied: {}...".format(self.dataset_name, self.table_name, self.filters))
try:
url = self.env.url + '/v2/rows'
# Time post request
start_time = datetime.now(timezone('US/Eastern'))
# Tracking variables
start_location = None
results = []
# Run the rows endpoint through until we break
while True:
post_data = self.getRowsPostData(start_location)
response = requests.post(
url, headers=self.env.header, data=post_data).json()
results.extend(response['rows'])
# If endLocation is null/None, we have successfully gotten all the rows
# This is also where you'd want to check against the limit (if len(results) > limit)
if response['endLocation'] is None:
break
# Update the start location and loop again
start_location = response['endLocation']
total_run_time = str(datetime.now(
timezone('US/Eastern')) - start_time)
requestResponsePrint(response, total_run_time, self.env.verbose)
if len(results) == 0:
if self.env.verbose:
print(colored("Data request empty!", "red"))
## returns an empty dataframe with the column structure of the table itself
df = pd.DataFrame(columns=self.getColumnList())
else:
df = pd.DataFrame(results)
return df
except ValueError: # includes simplejson.decoder.JSONDecodeError
print(
colored("\nError - check response message or text to json parser.", "red"))
return None
## loads a subset of the table rows
## it will default to 100 rows, but a user can specfiy a number of rows
def loadTableSample(self, sample_size=100):
print((colored("Note: This endpoint is experimental - expect longer load times \
for larger datasets", "cyan")))
if self.filters == {}:
print("\nDownloading {} ROWS: {}, {}...".format(sample_size, self.dataset_name,
self.table_name))
else:
print("\nDownloading {} FILTERED ROWS: {} | {} \nFilter(s) \
applied: {} ...".format(sample_size, self.dataset_name, self.table_name,
self.filters))
try:
url = self.env.url + '/v1/rows'
post_data = self.getPostData(sample_size)
# Time post request
start_time = datetime.now(timezone('US/Eastern'))
response = requests.post(
url, headers=self.env.header, data=post_data)
total_run_time = str(datetime.now(
timezone('US/Eastern')) - start_time)
requestResponsePrint(response, total_run_time, self.env.verbose)
# Convert to text to remove extra "$" character
response.encoding = 'utf-8'
text_data = response.text
text_data = text_data[:-1]
if len(text_data) < 2:
if self.env.verbose:
print(colored("Data request empty!", "red"))
## returns an empty dataframe with the column structure of the table itself
df = pd.DataFrame(columns=self.getColumnList())
else:
json_data = json.loads(text_data)
df = pd.DataFrame(json_data)
return df
except ValueError: # includes simplejson.decoder.JSONDecodeError
print(
colored("\nError - check response message or text to json parser.", "red"))
return None
## get match locations for a given substring in a given column
## returns: list of match locations
def getSubstringValueMatches(self, substring, column):
## data for substring post request
post_data = json.dumps({
"begins_with": False,
"substring_match": True,
"term": [substring],
"dataset": self.dataset_name,
"table": self.table_name,
"column": column
})
## call endpoint to retrieve location responses
value_search_endpoint = self.env.url + '/search'
response = requests.post(value_search_endpoint, headers=self.env.header, data=post_data)
response_str = response.text
try:
response_list = json.loads(response_str)
return response_list
except:
print (colored("There were no value results for your query", "red"))
return []
## loads all the rows in a given table that match the provided substring filters
## these filters are structured like {'column':['subtring1',...],...} just
## like the ordinary filters
def loadRowsWithSubstringMatches(self, substring_filters):
filters = {}
for col in substring_filters:
vals = []
for filt in substring_filters[col]:
response_list = self.getSubstringValueMatches(filt, col)
for r in response_list:
vals.append(r["value"])
filters[col] = vals
self.setFilters(filters)
df = self.loadRows()
return df
## This loads all the rows within a range of min and max
## values for the designated column
def loadRowsInRange(self, min_val, max_val, column):
## get all unique values
def listColumnCounts(column):
post_data = json.dumps({
"dataset": self.dataset_name,
"table": self.table_name,
"column": column,
"limit": 0,
"sort": "CNT_DESC"
})
url = self.env.url + '/v1/colCounts'
response = requests.post(url, headers=self.env.header, data=post_data)
if response.status_code == 401 or response.status_code == 403:
print (colored("Unauthorized: You are not authorized to perform this request," + \
" please contact the Data Profiler team", "red"))
return None
try:
text_data = response.text
text_data = text_data[:-1]
json_data = json.loads(text_data)
return json_data
except:
print (colored(f"There was a {response.status_code}" +
" error processing your request", "red"))
return None
if self.validateRangeInput(min_val, max_val, column):
value_count_df = pd.DataFrame(listColumnCounts(column))
value_list = list(value_count_df["value"])
## compare to min and max to get all unique values in range
min_float = float(min_val)
max_float = float(max_val)
values_in_range = []
for val in value_list:
try:
if float(val) >= min_float and float(val) < max_float:
values_in_range.append(val)
except:
continue
## set as the filter to load rows
filters = {column: values_in_range}
self.setFilters(filters)
## return results
match_df = self.loadRows()
return match_df
else:
return None
## This allows for the user to load all of the rows of a dataset
## except for those that match the exception filter
## exception filter structure: {"column": ["value_to_be_excluded"]}
def loadRowsWithException(self, exception_filter):
orig_filters = self.filters
## get all unique values for that column
def listColumnCounts(column):
post_data = json.dumps({
"dataset": self.dataset_name,
"table": self.table_name,
"column": column,
"limit": 0,
"sort": "CNT_DESC"
})
url = self.env.url + '/v1/colCounts'
response = requests.post(url, headers=self.env.header, data=post_data)
if response.status_code == 401 or response.status_code == 403:
print (colored("Unauthorized: You are not authorized to perform this request," + \
" please contact the Data Profiler team", "red"))
return None
try:
text_data = response.text
text_data = text_data[:-1]
json_data = json.loads(text_data)
return json_data
except:
print (colored(f"There was a {response.status_code}" +
" error processing your request", "red"))
return None
## creates the filter for all values that should be included
filters = {}
for col in exception_filter:
vals = exception_filter[col]
value_count_df = pd.DataFrame(listColumnCounts(col))
value_list = list(value_count_df.loc[~value_count_df["value"].isin(vals)]["value"])
filters[col] = value_list
## sets filters and retrieves the data
if self.env.verbose:
print (colored("New filters are being set just for this operation", "cyan"))
self.setFilters(filters)
df = self.loadRows()
self.setFilters(orig_filters)
return df
# Check for number of missing/blank values in the table
def __getNAscount(self, blank_types={'',' ','-',None,np.nan}):
'''
Find missing values present in columns of selected table
Parameters:
blank_types (set): what constitutes missing values for selected table
Returns:
dictionary: Returns dictionary of missing values present across all columns
'''
df = self.loadRows()
blank_vals = dict( df.apply(lambda x: x[x.isin(blank_types)].shape[0],0) )
return blank_vals
# Get quantile values of numeric columns
def __getQuantiles(self, qvals={'0','.25','.5','.75','1'}, columns=set()):
'''
Get quantile values of numeric columns from selected or all columns
Parameters:
qvals (set): list of values between 0 <= q <= 1, the quantile(s) to compute
columns (set): list of columns to compute the quantiles for
Returns:
dictionary: dictionary of all the quantile values. Raises KeyError
if selected column doesn't exist in table. Returns None in case of Error or
datatype of selected column is not numeric
ex: quantiles['column1'] = { 0 : val1 , 0.25 : val2 , ..}
'''
df = self.loadRows()
Tableinfo = self.getTableInfo()
if len(columns) != 0:
for column in columns:
try:
if Tableinfo[column]['data_type'] not in {'integer','long','float','double'}:
print(colored("\nError - Select columns with data type as" \
"integer, float, double or long.", "red"))
return None
except KeyError:
print(colored("\nError - Column {} doesn't exist \
in table.".format(column),"red"))
return None
else:
quantiles = {}
for column in Tableinfo:
#check for acceptable data types
if Tableinfo[column]['data_type'] in {'integer','long','float','double'}:
columns.add(column)
if len(columns) == 0:
print('The table doesn\'t contain columns with float or integer data type')
return None
else:
columns = list(columns)
try:
qvals = list(float(q) for q in qvals)
except:
print(colored("quantile values must be numbers", "red"))
df[columns] = df[columns].apply( lambda x: pd.to_numeric(x,errors = 'coerce'),\
axis = 0)
quantiles = {}
for column in columns:
quantiles[column] = dict(zip(qvals,
list(np.quantile(df[column],qvals))))
return quantiles
# Descriptive Stats for numeric columns
def __getDescriptiveStats(self, columns=set(), decimals=2):
'''
Get descriptive statistics like mean, mode, standard deviation,
sum, median absolute deviation, coefficient of variation, kurtosis,
skewness of numeric columns from selected or all columns
Parameters:
columns (set): list of columns to compute the quantiles for
decimals (int): Number of decimal places to round each column to
Returns:
dictionary: dictionary of all the descriptive statistics
of all the relevant columns. Raises KeyError
if selected column doesn't exist in table. Returns None in case of Error or
datatype of selected column is not numeric
ex: Dstats['column1'] = { 'stat1': val1 , 'stat2': val2 }
'''
df = self.loadRows()
Tableinfo = self.getTableInfo()
if columns != set():
for column in columns:
try:
if Tableinfo[column]['data_type'] not in {'integer',
'long', 'float', 'double'}:
print(colored("\nError - Select columns with data type as" /
" integer, float, double or long.", "red"))
return None
except KeyError:
print(colored("\nError - Column {} doesn't exist \
in table.".format(column),"red"))
return None
else:
for column in Tableinfo:
if Tableinfo[column]['data_type'] in {'integer','long','float','double'}:
columns.add(column)
if len(columns) == 0:
print('The table doesn\'t contain columns with numeric data type')
return None
else:
columns = list(columns)
df[columns] = df[columns].apply( lambda x: pd.to_numeric(x,errors = 'coerce'),
axis = 0)
Dstats = {}
for col in columns:
Dstats[col] = {}
Dstats[col]['mean'] = round(df[col].mean(skipna = True), decimals)
Dstats[col]['median'] = round(df[col].median(skipna = True), decimals)
Dstats[col]['mode'] = round(df[col].mode(dropna = True), decimals)
Dstats[col]['summation'] = round(df[col].sum(skipna = True), decimals)
Dstats[col]['standard_deviation'] = round(df[col].std(skipna = True),decimals)
Dstats[col]['mean_absolute_deviation'] = round(df[col].mad(skipna = True),
decimals)
Dstats[col]['variation'] = round(variation(df[col]), decimals)
Dstats[col]['kurtosis'] = round(kurtosis(df[col]), decimals)
Dstats[col]['skewness'] = round(skew(df[col]), decimals)
return Dstats
##### get list of valid datasets and tables for verification of input #####
def validDatasets(self):
valid_datasets = self.env.getDatasetList()
return valid_datasets
def validTables(self):
url = self.env.url + '/v1/tables/{}'.format(self.dataset_name)
response = requests.get(url, headers=self.env.header)
valid_tables = list(response.json().keys())
return valid_tables
##### validate the dataset and table inputs #####
def validateData(self):
valid_datasets = self.validDatasets()
dataset_valid = validate(self.dataset_name, valid_datasets, "Dataset")
if dataset_valid:
valid_tables = self.validTables()
table_valid = validate(self.table_name, valid_tables, "Table")
return dataset_valid & table_valid
##### validate the columns chosen for filtering #####
def validateFilters(self):
valid_columns = self.getColumnList()
if self.filters != {}:
filter_keys = [x for x in self.filters]
for key in filter_keys:
valid_filter = validate(key, valid_columns, "Filter Column")
if valid_filter == False:
return False
return True
## validate that the inputs into the range search query are valid
## returns: validity boolean
def validateRangeInput(self, min_val, max_val, column):
try:
float(min_val)
float(max_val)
except:
print (colored("Min and max values must be integers or floats","red"))
return False
if column in self.getColumnList():
return True
else:
print (colored("Column must be valid column in table", "red"))
return False
class Dataset():
def __init__(self, environment, dataset_name):
self.dataset_name = dataset_name
self.env = environment
self.validateData()
self.dataset_info = self.getDatasetInfo()
self.metadata = self.getDatasetMetadata()
def getDatasetInfo(self):
url = self.env.url + '/v1/tables/{}'.format(self.dataset_name)
response = requests.get(url, headers=self.env.header)
return response.json()
def getDatasetMetadata(self):
url = self.env.url + '/v1/datasets'
response = requests.get(url, headers=self.env.header)
return response.json()[self.dataset_name]
def getTableList(self):
return list(self.dataset_info.keys())
def getUploadDate(self):
epoch_time = float(self.metadata['load_time'])/1000
return datetime.fromtimestamp(epoch_time)
def getUpdateDate(self):
epoch_time = float(self.metadata['update_time'])/1000
return datetime.fromtimestamp(epoch_time)
def getVisibility(self):
return self.metadata["visibility"]
def getTableCount(self):
return self.metadata["num_tables"]
def getColumnCount(self):
return self.metadata["num_columns"]
def getValueCount(self):
return self.metadata["num_values"]
def getPullTimestamp(self):
epoch_time = float(self.metadata["timestamp"])/1000
return datetime.fromtimestamp(epoch_time)
def getUserAccessList(self):
url = self.env.url + "/rules_of_use"
post_data = json.dumps({
"query":"{usersWithAttribute(value:\""+self.getVisibility()+"\"){username}}"
})
response = requests.post(url, headers=self.env.header, data=post_data)
if response.status_code == 401 or response.status_code == 403:
print (colored("Unauthorized: You are not authorized to perform this request, \
please contact the Data Profiler team", "red"))
return None
try:
usernames = [x["username"] for x in json.loads(response.text)["data"]["usersWithAttribute"]]
return usernames
except:
print (colored("There was a {} error processing your \
request".format(response.status_code), "red"))
return None
def getUploadCount(self):
url = self.env.url + '/jobs'
response = requests.get(url, headers=self.env.header)
if response.status_code == 200:
try:
count = 0
jobs = json.loads(response.text)
for upload in jobs:
if (jobs[upload]["datasetName"] == self.dataset_name and \
self.getVisibility() in jobs[upload]["visibilities"]):
count += 1
else:
continue
return count
except:
if self.env.verbose:
print (colored("Your request was empty", "red"))
else:
print (colored("There was a {} error processing your \
request".format(response.status_code), "red"))
return None
def getAllUploadDates(self):
url = self.env.url + '/jobs'
response = requests.get(url, headers=self.env.header)
if response.status_code == 200:
try:
jobs = json.loads(response.text)
dates = []
for upload in jobs:
if (jobs[upload]["datasetName"] == self.dataset_name and \
self.getVisibility() in jobs[upload]["visibilities"]):
epoch_time = float(jobs[upload]["timestamp"])/1000
dates.append(datetime.fromtimestamp(epoch_time))
else:
continue
return dates
except:
if self.env.verbose:
print (colored("Your request was empty", "red"))
else:
print (colored("There was a {} error processing your\
request".format(response.status_code), "red"))
return None
def validateData(self):
valid_datasets = self.env.getDatasetList()
dataset_valid = validate(self.dataset_name, valid_datasets, "Dataset")
return dataset_valid
def loadTable(self, table_name):
if self.env.verbose:
print("\nDownloading ALL ROWS: ", self.dataset_name, ",", table_name, "...")
try:
url = self.env.url + '/v1/rows'
post_data = json.dumps({
"dataset": self.dataset_name,
"table": table_name,
"limit": 0,
"sort": "CNT_DESC"
})
# Time post request
start_time = datetime.now(timezone('US/Eastern'))
response = requests.post(
url, headers=self.env.header, data=post_data)
total_run_time = str(datetime.now(
timezone('US/Eastern')) - start_time)
requestResponsePrint(response, total_run_time, self.env.verbose)
# Convert to text to remove extra "$" character
response.encoding = 'utf-8'
text_data = response.text
text_data = text_data[:-1]
if len(text_data) < 2:
if self.env.verbose:
print(colored("Data request empty!", "red"))
## returns an empty dataframe with the column structure of the table itself
df = pd.DataFrame(columns=self.getColumnList())
else:
json_data = json.loads(text_data)
df = pd.DataFrame(json_data)
return df
except ValueError: # includes simplejson.decoder.JSONDecodeError
print(
colored("\nError - check response message or text to json parser.", "red"))
return None
def importDataset(self):
if self.env.verbose:
print((colored("Note: This endpoint is experimental - expect longer load times for larger datasets", "cyan")))
tables = self.getTableList()
all_tables = {}
for table in tables:
all_tables[table] = self.loadTable(table)
return all_tables
class Environment():
def __init__(self, api_key, url, verbose=True):
self.api_key = api_key
self.url = url
self.header = {
"Content-Type": "application/json",
"X-Api-Key": self.api_key,
"Accept": "application/json"
}
self.verbose = verbose
self.env_info = self.getEnvironmentInfo()
##### gets all specific information about the contents of the environment#####
def getEnvironmentInfo(self):
url = self.url + '/v1/datasets'
try:
response = requests.get(url, headers=self.header)
return response.json()
except:
print (colored("ERROR: Could not connect to DP. Check that you are connected to the VPN before retrying", "red"))
return None
##### gets a list of all the datasets in the env available to the user #####
def getDatasetList(self):
return list(self.env_info.keys())
##### the number of datasets in the environment #####
def getDatasetCount(self):
return len(self.getDatasetList())
def getUserAccessList(self, visibility):
url = self.url + "/rules_of_use"
post_data = json.dumps({
"query":"{usersWithAttribute(value:\""+visibility+"\"){username}}"
})
response = requests.post(url, headers=self.header, data=post_data)
if response.status_code == 401 or response.status_code == 403:
print (colored("Unauthorized: You are not authorized to perform this request,\
please contact the Data Profiler team", "red"))
return None
try:
usernames = [x["username"] for x in json.loads(response.text)["data"]["usersWithAttribute"]]
return usernames
except:
print (colored("There was a {} error processing your \
request".format(response.status_code), "red"))
return None
##### get a sample join between two tables on the two given columns #####
def getSampleJoin(self, dataset_a, table_a, column_a, dataset_b, table_b,
column_b, sample_size):
url = self.url + "/experimental/joinStats"
post_data = json.dumps({
"dataset_a":dataset_a,
"table_a":table_a,
"col_a":column_a,
"dataset_b":dataset_b,
"table_b":table_b,
"col_b":column_b,
"limit":sample_size
})
response = requests.post(url, headers=self.header, data=post_data)
if response.status_code == 200:
try:
json_data = json.loads(response.text)
return | pd.DataFrame(json_data["sample"]) | pandas.DataFrame |
import pandas as pd
# 欠損値補完
from sklearn.impute import SimpleImputer
# 次元圧縮
from sklearn.feature_selection import RFE
from sklearn.feature_selection import RFECV
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score
# one hot encoding(カテゴリカル変数=>ohe)
def one_hot_encoding(data, ohe_columns):
X_ohe = pd.get_dummies(data,
dummy_na=True, # 欠損値もダミー化
columns=ohe_columns)
X_ohe_columns = X_ohe.columns.values
print('X_ohe shape:(%i,%i)' % X_ohe.shape)
return X_ohe, X_ohe_columns
# モデル用データの前処理:数値変数の欠損値対応
def imputing_nan(X_ohe_for_training, X_ohe_apply_to):
imp = SimpleImputer() # default設定で平均値
imp.fit(X_ohe_for_training) # impにて計算するデータ
X_ohe_columns = X_ohe_for_training.columns.values
X_ohe = pd.DataFrame(imp.transform(X_ohe_apply_to), columns=X_ohe_columns)
return X_ohe, imp
# 次元圧縮
def dimension_compression(X_ohe, y):
#selector = RFE(RandomForestClassifier(n_estimators=100, random_state=1),
# n_features_to_select=15, # 圧縮後の次元数
# step=.05)
selector = RFECV(estimator=RandomForestClassifier(n_estimators=100,random_state=0), step=0.05)
selector.fit(X_ohe,y)
X_ohe_columns = X_ohe.columns.values
# 学習用のデータセットを処理
# selector.support_には、True/Falseのリストとなっている
X_fin = X_ohe.loc[:, X_ohe_columns[selector.support_]]
print('X_fin shape:(%i,%i)' % X_fin.shape)
return X_fin, selector
def fit_on_pipelines(pipelines, gs_params, X_train, X_test, y_train, y_test, evaluation_scoring):
# スコア格納用のscores初期化(Dict型)
scores = {}
# パイプラインの先頭にある文字列(例えば、'KNN')が pipe_name に、
# 各パイプラインのインスタンスがpipelineに順次入る
for pipe_name, pipeline in pipelines.items():
print(pipe_name)
gs = GridSearchCV(estimator=pipeline,
param_grid = gs_params[pipe_name],
scoring=evaluation_scoring,
cv=5,
return_train_score=False)
# 学習
gs.fit(X_train, y_train)
scores[(pipe_name,'train')] = accuracy_score(y_train, gs.predict(X_train))
scores[(pipe_name,'test')] = accuracy_score(y_test, gs.predict(X_test))
# 各モデル格納用のディレクトリ を作成
os.makedirs('../models/pipeline_models', exist_ok=True)
# 各モデル保存(modelフォルダー)
file_name = '../models/pipeline_models/'+pipe_name+'.pkl'
pickle.dump(pipeline, open(file_name, 'wb'))
return scores
def compare_train_test_data(X_ohe, X_ohe_s):
cols_model= set(X_ohe.columns.values)
cols_score = set(X_ohe_s.columns.values)
diff1 = cols_model - cols_score
print('モデル用データのみに存在する項目: %s' %diff1)
diff2 = cols_score - cols_model
print('スコア用データのみに存在する項目: %s' %diff2)
# one-hot-encoding後のデータ不整合の解消
# モデル用にはあるが、スコア用に存在しない変数は復活させる
# スコア用データにあるが、モデル用に存在しない変数は削除する
def resolution_for_inconsistent_data(X_ohe, X_ohe_s):
dataset_cols_m = pd.DataFrame(None, # 空のデータ
columns=X_ohe.columns.values,# モデリング時のone-hot-encoding後のカラム構成
dtype=float)
X_ohe_s = | pd.concat([dataset_cols_m, X_ohe_s]) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.