prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import streamlit as st
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
st.write("""
# Iris Flower Prediction App
This app predicts the Iris flower type
""")
st.sidebar.header('User Input Parameters')
def user_input_features():
sepal_length = st.sidebar.slider('Sepal Length', 4.3,7.9,5.4)
sepal_width = st.sidebar.slider('Sepal Width', 2.0,4.4,3.4)
petal_length = st.sidebar.slider('Petal Length', 1.0,6.9,1.3)
petal_width = st.sidebar.slider('Petal Width', 0.1,2.5,0.2)
data = {
'sepal-length': sepal_length,
'sepal-width': sepal_width,
'petal-length': petal_length,
'petal-width': petal_width
}
features =
|
pd.DataFrame(data, index=[0])
|
pandas.DataFrame
|
"""This module is meant to contain the Solscan class"""
from messari.dataloader import DataLoader
from messari.utils import validate_input
from string import Template
from typing import Union, List, Dict
from .helpers import unpack_dataframe_of_dicts
import pandas as pd
#### Block
BLOCK_LAST_URL = 'https://public-api.solscan.io/block/last'
BLOCK_TRANSACTIONS_URL = 'https://public-api.solscan.io/block/transactions'
BLOCK_BLOCK_URL = Template('https://public-api.solscan.io/block/$block')
#### Transaction
TRANSACTION_LAST_URL = 'https://public-api.solscan.io/transaction/last'
TRANSACTION_SIGNATURE_URL = Template('https://public-api.solscan.io/transaction/$signature')
#### Account
ACCOUNT_TOKENS_URL = 'https://public-api.solscan.io/account/tokens'
ACCOUNT_TRANSACTIONS_URL = 'https://public-api.solscan.io/account/transactions'
ACCOUNT_STAKE_URL = 'https://public-api.solscan.io/account/stakeAccounts'
ACCOUNT_SPL_TXNS_URL = 'https://public-api.solscan.io/account/splTransfers'
ACCOUNT_SOL_TXNS_URL = 'https://public-api.solscan.io/account/solTransfers'
ACCOUNT_EXPORT_TXNS_URL = 'https://public-api.solscan.io/account/exportTransactions'
ACCOUNT_ACCOUNT_URL = Template('https://public-api.solscan.io/account/$account')
#### Token
TOKEN_HOLDERS_URL = 'https://public-api.solscan.io/token/holders'
TOKEN_META_URL = 'https://public-api.solscan.io/token/meta'
TOKEN_LIST_URL = 'https://public-api.solscan.io/token/list'
#### Market
MARKET_INFO_URL = Template('https://public-api.solscan.io/market/token/$tokenAddress')
#### Chain Information
CHAIN_INFO_URL = 'https://public-api.solscan.io/chaininfo'
#TODO max this clean/ not hardcoded? look into how this works
HEADERS={'accept': 'application/json', 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36'} # pylint: disable=line-too-long
class Solscan(DataLoader):
"""This class is a wrapper around the Solscan API
"""
def __init__(self):
DataLoader.__init__(self, api_dict=None, taxonomy_dict=None)
#################
# Block endpoints
def get_last_blocks(self, num_blocks=1) -> pd.DataFrame:
"""returns info for last blocks (default is 1, limit is 20)
Parameters
----------
num_blocks: int (default is 1)
number of blocks to return, max is 20
Returns
-------
DataFrame
DataFrame with block information
"""
# Max value is 20 or API bricks
limit=num_blocks if num_blocks < 21 else 20
params = {'limit': limit}
last_blocks = self.get_response(BLOCK_LAST_URL,
params=params,
headers=HEADERS)
last_blocks_df = pd.DataFrame(last_blocks)
last_blocks_df.set_index('currentSlot', inplace=True)
last_blocks_df = unpack_dataframe_of_dicts(last_blocks_df)
# TODO, extract data from 'result'
return last_blocks_df
def get_block_last_transactions(self, blocks_in: Union[str, List],
offset=0, num_transactions=10) -> pd.DataFrame:
"""get last num_transactions of given block numbers
Parameters
----------
blocks_in: str, List
single block in or list of blocks in
num_transactions: int (default is 10)
number of transactions to return
Returns
-------
DataFrame
dataframe with transaction details
"""
blocks = validate_input(blocks_in)
df_list = []
for block in blocks:
params = {'block': block,
'offset': offset,
'limit': num_transactions}
txns = self.get_response(BLOCK_TRANSACTIONS_URL,
params=params,
headers=HEADERS)
txns_df = pd.DataFrame(txns)
df_list.append(txns_df)
fin_df = pd.concat(df_list, keys=blocks, axis=1)
fin_df = unpack_dataframe_of_dicts(fin_df)
return fin_df
def get_block(self, blocks_in: Union[str, List]) -> pd.DataFrame:
"""Return information of given block(s)
Parameters
----------
blocks_in: str, List
single block in or list of blocks in
Returns
-------
DataFrame
DataFrame with block information
"""
blocks = validate_input(blocks_in)
df_list = []
for block in blocks:
endpoint_url = BLOCK_BLOCK_URL.substitute(block=block)
response = self.get_response(endpoint_url,
headers=HEADERS)
df = pd.DataFrame(response)
df.drop('currentSlot', axis=1)
df_list.append(df)
fin_df = pd.concat(df_list, keys=blocks, axis=1)
fin_df = fin_df.xs('result', axis=1, level=1)
return fin_df
#######################
# Transaction endpoints
def get_last_transactions(self, num_transactions=10) -> pd.DataFrame:
"""Return last num_transactions transactions
Parameters
----------
num_transactions: int (default is 10)
number of transactions to return, limit is 20
Returns
-------
DataFrame
dataframe with transaction details
"""
# 20
limit=num_transactions if num_transactions < 21 else 20
params = {'limit': limit}
response = self.get_response(TRANSACTION_LAST_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
fin_df = unpack_dataframe_of_dicts(df)
return fin_df
def get_transaction(self, signatures_in: Union[str, List]) -> pd.DataFrame:
"""Return information of given transaction signature(s)
Parameters
----------
signatures_in: str, List
single signature in or list of signatures in
Returns
-------
DataFrame
DataFrame with transaction details
"""
signatures = validate_input(signatures_in)
series_list = []
for signature in signatures:
endpoint_url = TRANSACTION_SIGNATURE_URL.substitute(signature=signature)
response = self.get_response(endpoint_url,
headers=HEADERS)
#print(response)
series = pd.Series(response)
series_list.append(series)
fin_df =
|
pd.concat(series_list, keys=signatures, axis=1)
|
pandas.concat
|
import os
import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
from sklearn.preprocessing import LabelEncoder
def get_splits(random_state = 42, data_folder = './data/', file_extension = '.jpeg'):
images_path = os.path.join(data_folder, 'images')
labels_path = os.path.join(data_folder, 'dermx_labels.csv')
labels = pd.read_csv(labels_path)
labels['path'] = labels['image_id'].apply(lambda x : os.path.join(images_path, f'{x}.jpeg'))
labels['exists'] = labels['path'].apply(lambda x : os.path.exists(x)).astype(int)
#Drop the rows we don't have data on.
labels = labels.loc[labels['exists'] == 1]
le = LabelEncoder()
#To begin with, we just want the path and the diagnosis.
data = pd.DataFrame({'path' : labels['path'], 'target' : le.fit_transform(labels['diagnosis'])})
#Train is 70%, val is 15% and test is 15%.
train, val = train_test_split(data, test_size = 0.3, random_state = random_state)
val, test = train_test_split(val, test_size = 0.5, random_state = random_state)
return (train.path.tolist(), train.target.tolist()), (val.path.tolist(), val.target.tolist()), (test.path.tolist(), test.target.tolist()), le
def get_splits_characteristics(random_state = 42, data_folder = './data/', file_extension = '.jpeg'):
images_path = os.path.join(data_folder, 'images')
labels_path = os.path.join(data_folder, 'dermx_labels.csv')
labels =
|
pd.read_csv(labels_path)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
from warnings import catch_warnings
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
import pandas as pd
from pandas.core import config as cf
from pandas.compat import u
from pandas._libs.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import (
array_equivalent, isnull, notnull,
na_value_for_dtype)
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
class TestIsNull(object):
def test_0d_array(self):
assert isnull(np.array(np.nan))
assert not isnull(np.array(0.0))
assert not isnull(np.array(0))
# test object dtype
assert isnull(np.array(np.nan, dtype=object))
assert not isnull(np.array(0.0, dtype=object))
assert not isnull(np.array(0, dtype=object))
def test_empty_object(self):
for shape in [(4, 0), (4,)]:
arr = np.empty(shape=shape, dtype=object)
result = isnull(arr)
expected = np.ones(shape=shape, dtype=bool)
tm.assert_numpy_array_equal(result, expected)
def test_isnull(self):
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert float('nan')
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert isinstance(isnull(s), Series)
# frame
for df in [tm.makeTimeDataFrame(), tm.makePeriodFrame(),
tm.makeMixedDataFrame()]:
result = isnull(df)
expected = df.apply(isnull)
tm.assert_frame_equal(result, expected)
# panel
with catch_warnings(record=True):
for p in [tm.makePanel(), tm.makePeriodPanel(),
tm.add_nans(tm.makePanel())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel_equal(result, expected)
# panel 4d
with catch_warnings(record=True):
for p in [tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel4d_equal(result, expected)
def test_isnull_lists(self):
result = isnull([[False]])
exp = np.array([[False]])
tm.assert_numpy_array_equal(result, exp)
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
tm.assert_numpy_array_equal(result, exp)
# list of strings / unicode
result = isnull(['foo', 'bar'])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
result = isnull([u('foo'), u('bar')])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
def test_isnull_nat(self):
result = isnull([NaT])
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
result = isnull(np.array([NaT], dtype=object))
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
def test_isnull_numpy_nat(self):
arr = np.array([NaT, np.datetime64('NaT'), np.timedelta64('NaT'),
np.datetime64('NaT', 's')])
result = isnull(arr)
expected = np.array([True] * 4)
tm.assert_numpy_array_equal(result, expected)
def test_isnull_datetime(self):
assert not isnull(datetime.now())
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
exp = np.ones(len(idx), dtype=bool)
tm.assert_numpy_array_equal(notnull(idx), exp)
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
# GH 9129
pidx = idx.to_period(freq='M')
mask = isnull(pidx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
mask = isnull(pidx[1:])
exp = np.zeros(len(mask), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
def test_datetime_other_units(self):
idx = pd.DatetimeIndex(['2011-01-01', 'NaT', '2011-01-02'])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(idx), exp)
tm.assert_numpy_array_equal(notnull(idx), ~exp)
tm.assert_numpy_array_equal(isnull(idx.values), exp)
tm.assert_numpy_array_equal(notnull(idx.values), ~exp)
for dtype in ['datetime64[D]', 'datetime64[h]', 'datetime64[m]',
'datetime64[s]', 'datetime64[ms]', 'datetime64[us]',
'datetime64[ns]']:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(values), exp)
tm.assert_numpy_array_equal(notnull(values), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(values)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
s = pd.Series(values, dtype=object)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
def test_timedelta_other_units(self):
idx = pd.TimedeltaIndex(['1 days', 'NaT', '2 days'])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(idx), exp)
tm.assert_numpy_array_equal(notnull(idx), ~exp)
tm.assert_numpy_array_equal(isnull(idx.values), exp)
tm.assert_numpy_array_equal(notnull(idx.values), ~exp)
for dtype in ['timedelta64[D]', 'timedelta64[h]', 'timedelta64[m]',
'timedelta64[s]', 'timedelta64[ms]', 'timedelta64[us]',
'timedelta64[ns]']:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(values), exp)
tm.assert_numpy_array_equal(notnull(values), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(values)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
s = pd.Series(values, dtype=object)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
def test_period(self):
idx = pd.PeriodIndex(['2011-01', 'NaT', '2012-01'], freq='M')
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(idx), exp)
tm.assert_numpy_array_equal(notnull(idx), ~exp)
exp =
|
pd.Series([False, True, False])
|
pandas.Series
|
"""API definition for seaicetimeseries.
A package for accessing and manipulating sea ice timeseries data.
"""
import datetime as dt
import numpy as np
import pandas as pd
from . import access
from . import common as c
from . import warp
import seaice.nasateam as nt
def daily(hemisphere=None,
start_date=None, end_date=None,
data_store=nt.DAILY_DATA_STORE_FILENAME,
columns=nt.DAILY_DEFAULT_COLUMNS,
interpolate=-1, nday_average=0, min_valid=2,
preserve_nan=False, filter_failed_qa=True):
"""Return a Pandas dataframe of the data in data_store filtered by input parameters.
Keyword Arguments
-----------------
hemisphere: Select 'N' for northern or 'S' for southern to return only
data for desired hemisphere.
start_date: numpy datetime64 object whose value is the first day of
data to return. If 'None', return the first available date in the
data_store.
end_date: numpy datetime64 object whose value is the last day of data
to return. If 'None', then return everything up until the last day of
data in the data_store.
data_store: 'sedna' style backend file. Where the data are in CSV format
and columns are date, total_extent_km2, total_area_km2, missing_km2,
hemisphere, filename. The default value is this package's
DAILY_DATA_STORE_FILENAME.
columns: list of columns to extract and return from the datastore. By
default this list contains nt.DAILY_DEFAULT_COLUMNS which are the data
and metadata for the whole hemisphere. If this list is empty [], return
every column in the datastore.
interpolate: number of missing NaN values to fill in a column. Calls
Pandas.Series.interpolate using default 'linear' interpolation.
Interpolate will operate on all columns excluding NT.METADATA_COLUMNS and
return a copy of the original dataframe with each column's values
replaced with interpolated values. This operation will also drop any
missing data columns.
- value <= 0 No interpolation. (default)
- value > 0 Interpolate if only 'value' values are missing
(suggest: 1).
- None fill any missing regardless of number of consecutive
missing (not recommended).
nday_average: Number of days to use to compute a rolling mean. This number
is the width of the moving window, or the number of values used in
calculating the statistic. ASINA and other projects will use a 5 day
rolling mean in order to smooth data to have nicer looking graphs.
min_valid: If nday_average is non-zero, this is the number of days of
valid data required within the nday_average window in order to
compute a valid mean for the window.
preserve_nan : If nday_average is non-zero, and this flag is set to
True, np.nan will be returned for any values that were originally missing,
regardless of if an average could be calculated for them.
filter_failed_qa: Set all returned values to np.nan for rows with failed_qa
set as true in the data store. Defaults to True
"""
df = access._dataframe_from_data_store(data_store)
df = warp.collapse_hemisphere_index(df)
df = warp.filter_hemisphere(df, hemisphere)
df.index = df.index.to_timestamp()
if filter_failed_qa:
df = warp.filter_failed_qa(df)
df = warp.filter_columns(df, columns)
if interpolate is None or interpolate > 0:
df = warp.interpolate_df(df, interpolate)
df = warp.drop_missing_columns(df)
if nday_average > 0:
df = warp.nday_average(df, nday_average, min_valid, preserve_nan, False)
df = warp.filter_before(df, start_date)
df = warp.filter_after(df, end_date)
return df
def monthly(hemisphere=None,
start_date=None, end_date=None, month_num=None,
data_store=nt.MONTHLY_DATA_STORE_FILENAME,
columns=nt.MONTHLY_DEFAULT_COLUMNS):
"""Return a Pandas dataframe of the monthly data in data_store filtered by
the input parameters
Keyword Arguments
-----------------
hemisphere: Either 'N' for northern or 'S' for southern to return only
data for selected hemisphere.
start_date: Python datetime.date object whose value is the first day of
data to return. If not None, exclude date before this date from the
returned dataframe. Since the data_store data is monthly, consider
any date within a month to be part of that month. i.e. if the date
is entered as a datetime.date(2000, 3, 15), no data would be included
for Feb 2000 or earlier.
end_date: Python datetime.date object whose value is the last day of data
to return. If not None, then exclude any data after this date from
the returned dataframe.
month_num: Filter returned data to only this month. i.e. if you only
wanted to examine January data you would set this value to 1.
data_store: 'sedna' style backend file. Where the data are in CSV format
and columns are month, total_extent_km2, total_area_km2,
missing_km2, hemisphere, filename. The default value is this
package's MONTHLY_DATA_STORE_FILENAME.
columns: list of columns to extract and return from the datastore. By
default this list contains nt.MONTHLY_DEFAULT_COLUMNS which are the data
and metadata for the whole hemisphere. If this list is empty [], return
every column in the datastore.
"""
df = access._dataframe_from_data_store_monthly(data_store)
df = warp.collapse_hemisphere_index(df)
df = warp.filter_hemisphere(df, hemisphere)
df = warp.filter_columns(df, columns)
df = warp.filter_before(df, start_date)
df = warp.filter_after(df, end_date)
if month_num is not None:
df = df[df.index.month == month_num]
return df
def monthly_rates_of_change(hemisphere, data_store=nt.DAILY_DATA_STORE_FILENAME):
"""Return a Pandas dataframe of the data in data_store for the specified
hemisphere. Statistics related to monthly change are computed and included
in the returned DataFrame.
Keyword Arguments
-----------------
data_store: 'sedna' style backend file. Where the data are in CSV format and
columns are date, total_extent_km2, total_area_km2, missing_km2,
hemisphere, filename. The default value is this package's
DAILY_DATA_STORE_FILENAME.
"""
df = daily(hemisphere, data_store=data_store)
# don't include the current month in the rates of change calculation
first_of_this_month = dt.date.today().replace(day=1).strftime('%Y-%m-%d')
df = df[df.index < first_of_this_month]
df['extent'] = scale(df.total_extent_km2)
df = df[['extent', 'hemisphere']]
df['interpolated_extent'] = df.extent.interpolate(limit=1)
df['5 Day'] = nday_average(df['interpolated_extent'], num_days=5, min_valid=2)
df['day'] = df.index.day
df['month'] = df.index.month
df['year'] = df.index.year
a = df.groupby([df.index.year, df.index.month])
mismatch = a['interpolated_extent'].count() == a['day'].last()
a = df.groupby([df.index.year, df.index.month]).last()
a['ice change Mkm^2 per month'] = a['5 Day'].diff(periods=1)
# Set bad data
a.loc[mismatch == False, ['5 Day', 'ice change Mkm^2 per month']] = None # noqa
a['ice change km^2 per day'] = (a['ice change Mkm^2 per month'] / a['day']) * 1000000
a['ice change mi^2 per month'] = a['ice change Mkm^2 per month'] * c.KM2_TO_MI2 * 1000000
a['ice change mi^2 per day'] = a['ice change km^2 per day'] * c.KM2_TO_MI2
return a
def climatology_average_rates_of_change(hemisphere, data_store=nt.DAILY_DATA_STORE_FILENAME):
"""Return a Pandas dataframe of the data in data_store for the specified
hemisphere. The average rate of change for each month over climatological
range (1981-2010) is computed.
Keyword Arguments
-----------------
data_store: 'sedna' style backend file. Where the data are in CSV format and
columns are date, total_extent_km2, total_area_km2, missing_km2,
hemisphere, filename. The default value is this package's
DAILY_DATA_STORE_FILENAME.
"""
df = monthly_rates_of_change(hemisphere, data_store=data_store)
# use a DatetimeIndex instead of a year/month MultiIndex
df['date'] = df.apply(lambda x: pd.Timestamp(x.year, x.month, x.day), axis='columns')
df = df.set_index(['date'], drop=True)
df = warp.filter_before(df,
|
pd.Timestamp(c.DEFAULT_CLIMATOLOGY_DATES[0])
|
pandas.Timestamp
|
import numpy as np
import pandas as pd
import logging
logger = logging.getLogger(__name__)
class SqFtProFormaConfig(object):
"""
This class encapsulates the configuration options for the square
foot based pro forma.
parcel_sizes : list
A list of parcel sizes to test. Interestingly, right now
the parcel sizes cancel in this style of pro forma computation so
you can set this to something reasonable for debugging purposes -
e.g. [10000]. All sizes can be feet or meters as long as they are
consistently used.
fars : list
A list of floor area ratios to use. FAR is a multiple of
the parcel size that is the total building bulk that is allowed by
zoning on the site. In this case, all of these ratios will be
tested regardless of zoning and the zoning test will be performed
later.
uses : list
A list of space uses to use within a building. These are
mixed into forms. Generally speaking, you should only have uses
for which you have an estimate (or observed) values for rents in
the building. By default, uses are retail, industrial, office,
and residential.
forms : dict
A dictionary where keys are names for the form and values
are also dictionaries where keys are uses and values are the
proportion of that use used in this form. The values of the
dictionary should sum to 1.0. For instance, a form called
"residential" might have a dict of space allocations equal to
{"residential": 1.0} while a form called "mixedresidential"
might have a dictionary of space allocations equal to
{"retail": .1, "residential" .9] which is 90% residential and
10% retail.
parking_rates : dict
A dict of rates per thousand square feet where keys are the uses
from the list specified in the attribute above. The ratios
are typically in the range 0.5 - 3.0 or similar. So for
instance, a key-value pair of "retail": 2.0 would be two parking
spaces per 1,000 square feet of retail. This is a per square
foot pro forma, so the more typically parking ratio of spaces
per residential unit must be converted to square feet for use in
this pro forma.
sqft_per_rate : float
The number of square feet per unit for use in the
parking_rates above. By default this is set to 1,000 but can be
overridden.
parking_configs : list
An expert parameter and is usually unchanged. By default
it is set to ['surface', 'deck', 'underground'] and very semantic
differences in the computation are performed for each of these
parking configurations. Generally speaking it will break things
to change this array, but an item can be removed if that parking
configuration should not be tested.
parking_sqft_d : dict
A dictionary where keys are the three parking
configurations listed above and values are square foot uses of
parking spaces in that configuration. This is to capture the
fact that surface parking is usually more space intensive
than deck or underground parking.
parking_cost_d : dict
The parking cost for each parking configuration. Keys are the
name of the three parking configurations listed above and values
are dollars PER SQUARE FOOT for parking in that configuration.
Used to capture the fact that underground and deck are far more
expensive than surface parking.
height_for_costs : list
A list of "break points" as heights at which construction becomes
more expensive. Generally these are the heights at which
construction materials change from wood, to concrete, to steel.
Costs are also given as lists by use for each of these break
points and are considered to be valid up to the break point. A
list would look something like [15, 55, 120, np.inf].
costs : dict
The keys are uses from the attribute above and the values are a
list of floating point numbers of same length as the
height_for_costs attribute. A key-value pair of
"residential": [160.0, 175.0, 200.0, 230.0] would say that the
residential use if $160/sqft up to 15ft in total height for the
building, $175/sqft up to 55ft, $200/sqft up to 120ft, and
$230/sqft beyond. A final value in the height_for_costs
array of np.inf is typical.
height_per_story : float
The per-story height for the building used to turn an
FAR into an actual height.
max_retail_height : float
The maximum height of retail buildings to consider.
max_industrial_height : float
The maximum height of industrial buildings to consider.
profit_factor : float
The ratio of profit a developer expects to make above the break
even rent. Should be greater than 1.0, e.g. a 10% profit would be
a profit factor of 1.1.
building_efficiency : float
The efficiency of the building. This turns total FAR into the
amount of space which gets a square foot rent. The entire building
gets the cost of course.
parcel_coverage : float
The ratio of the building footprint to the parcel size. Also used
to turn an FAR into a height to cost properly.
cap_rate : float
The rate an investor is willing to pay for a cash flow per year.
This means $1/year is equivalent to 1/cap_rate present dollars.
This is a macroeconomic input that is widely available on the
internet.
"""
def __init__(self):
self._reset_defaults()
def _reset_defaults(self):
self.parcel_sizes = [10000.0]
self.fars = [.1, .25, .5, .75, 1.0, 1.5, 1.8, 2.0, 2.25, 2.5, 2.75,
3.0, 3.25, 3.5, 3.75, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 9.0, 11.0]
self.uses = ['retail', 'industrial', 'office', 'residential']
self.residential_uses = [False, False, False, True]
self.forms = {
'retail': {
"retail": 1.0
},
'industrial': {
"industrial": 1.0
},
'office': {
"office": 1.0
},
'residential': {
"residential": 1.0
},
'mixedresidential': {
"retail": .1,
"residential": .9
},
'mixedoffice': {
"office": 0.7,
"residential": 0.3
}
}
self.profit_factor = 1.1
self.building_efficiency = .7
self.parcel_coverage = .8
self.cap_rate = .05
self.parking_rates = {
"retail": 2.0,
"industrial": .6,
"office": 1.0,
"residential": 1.0
}
self.sqft_per_rate = 1000.0
self.parking_configs = ['surface', 'deck', 'underground']
self.costs = {
"retail": [160.0, 175.0, 200.0, 230.0],
"industrial": [140.0, 175.0, 200.0, 230.0],
"office": [160.0, 175.0, 200.0, 230.0],
"residential": [170.0, 190.0, 210.0, 240.0]
}
self.heights_for_costs = [15, 55, 120, np.inf]
self.parking_sqft_d = {
'surface': 300.0,
'deck': 250.0,
'underground': 250.0
}
self.parking_cost_d = {
'surface': 30,
'deck': 90,
'underground': 110
}
self.height_per_story = 10.0
self.max_retail_height = 2.0
self.max_industrial_height = 2.0
def _convert_types(self):
"""
convert lists and dictionaries that are useful for users to
np vectors that are usable by machines
"""
self.fars = np.array(self.fars)
self.parking_rates = np.array([self.parking_rates[use] for use in self.uses])
self.res_ratios = {}
assert len(self.uses) == len(self.residential_uses)
for k, v in self.forms.iteritems():
self.forms[k] = np.array([self.forms[k].get(use, 0.0) for use in self.uses])
# normalize if not already
self.forms[k] /= self.forms[k].sum()
self.res_ratios[k] = pd.Series(self.forms[k])[self.residential_uses].sum()
self.costs = np.transpose(np.array([self.costs[use] for use in self.uses]))
@property
def tiled_parcel_sizes(self):
return np.reshape(np.repeat(self.parcel_sizes, self.fars.size), (-1, 1))
def check_is_reasonable(self):
fars = pd.Series(self.fars)
assert len(fars[fars > 20]) == 0
assert len(fars[fars <= 0]) == 0
for k, v in self.forms.iteritems():
assert isinstance(v, dict)
for k2, v2 in self.forms[k].iteritems():
assert isinstance(k2, str)
assert isinstance(v2, float)
for k2, v2 in self.forms[k].iteritems():
assert isinstance(k2, str)
assert isinstance(v2, float)
for k, v in self.parking_rates.iteritems():
assert isinstance(k, str)
assert k in self.uses
assert 0 <= v < 5
for k, v in self.parking_sqft_d.iteritems():
assert isinstance(k, str)
assert k in self.parking_configs
assert 50 <= v <= 1000
for k, v in self.parking_sqft_d.iteritems():
assert isinstance(k, str)
assert k in self.parking_cost_d
assert 10 <= v <= 300
for v in self.heights_for_costs:
assert isinstance(v, int) or isinstance(v, float)
if np.isinf(v):
continue
assert 0 <= v <= 1000
for k, v in self.costs.iteritems():
assert isinstance(k, str)
assert k in self.uses
for i in v:
assert 10 < i < 1000
class SqFtProForma(object):
"""
Initialize the square foot based pro forma.
This pro forma has no representation of units - it does not
differentiate between the rent attained by 1BR, 2BR, or 3BR and change
the rents accordingly. This is largely because it is difficult to get
information on the unit mix in an existing building in order to compute
its acquisition cost. Thus rents and costs per sqft are used for new
and current buildings which assumes there is a constant return on
increasing and decreasing unit sizes, an extremely useful simplifying
assumption above the project scale (i.e. city of regional scale)
Parameters
----------
config : `SqFtProFormaConfig`
The configuration object which should be an
instance of `SqFtProFormaConfig`. The configuration options for this
pro forma are documented on the configuration object.
"""
def __init__(self, config=None):
if config is None:
config = SqFtProFormaConfig()
config.check_is_reasonable()
self.config = config
self.config._convert_types()
self._generate_lookup()
def _building_cost(self, use_mix, stories):
"""
Generate building cost for a set of buildings
Parameters
----------
use_mix : array
The mix of uses for this form
stories : series
A Pandas Series of stories
Returns
-------
array
The cost per sqft for this unit mix and height.
"""
c = self.config
# stories to heights
heights = stories * c.height_per_story
# cost index for this height
costs = np.searchsorted(c.heights_for_costs, heights)
# this will get set to nan later
costs[np.isnan(heights)] = 0
# compute cost with matrix multiply
costs = np.dot(np.squeeze(c.costs[costs.astype('int32')]), use_mix)
# some heights aren't allowed - cost should be nan
costs[np.isnan(stories).flatten()] = np.nan
return costs.flatten()
def _generate_lookup(self):
"""
Run the developer model on all possible inputs specified in the
configuration object - not generally called by the user. This part
computes the final cost per sqft of the building to construct and
then turns it into the yearly rent necessary to make break even on
that cost.
"""
c = self.config
# get all the building forms we can use
keys = c.forms.keys()
keys.sort()
df_d = {}
for name in keys:
# get the use distribution for each
uses_distrib = c.forms[name]
for parking_config in c.parking_configs:
# going to make a dataframe to store values to make
# pro forma results transparent
df = pd.DataFrame(index=c.fars)
df['far'] = c.fars
df['pclsz'] = c.tiled_parcel_sizes
building_bulk = np.reshape(
c.parcel_sizes, (-1, 1)) * np.reshape(c.fars, (1, -1))
building_bulk = np.reshape(building_bulk, (-1, 1))
# need to converge in on exactly how much far is available for
# deck pkg
if parking_config == 'deck':
orig_bulk = building_bulk
while 1:
parkingstalls = building_bulk * \
np.sum(uses_distrib * c.parking_rates) / c.sqft_per_rate
if np.where(
np.absolute(
orig_bulk - building_bulk -
parkingstalls *
c.parking_sqft_d[parking_config])
> 10.0)[0].size == 0:
break
building_bulk = orig_bulk - parkingstalls * \
c.parking_sqft_d[parking_config]
df['building_sqft'] = building_bulk
parkingstalls = building_bulk * \
np.sum(uses_distrib * c.parking_rates) / c.sqft_per_rate
parking_cost = (c.parking_cost_d[parking_config] *
parkingstalls *
c.parking_sqft_d[parking_config])
df['spaces'] = parkingstalls
if parking_config == 'underground':
df['park_sqft'] = parkingstalls * \
c.parking_sqft_d[parking_config]
stories = building_bulk / c.tiled_parcel_sizes
if parking_config == 'deck':
df['park_sqft'] = parkingstalls * \
c.parking_sqft_d[parking_config]
stories = ((building_bulk + parkingstalls *
c.parking_sqft_d[parking_config]) /
c.tiled_parcel_sizes)
if parking_config == 'surface':
stories = building_bulk / \
(c.tiled_parcel_sizes - parkingstalls *
c.parking_sqft_d[parking_config])
df['park_sqft'] = parkingstalls * \
c.parking_sqft_d[parking_config]
# not all fars support surface parking
stories[np.where(stories < 0.0)] = np.nan
df['total_sqft'] = df.building_sqft + df.park_sqft
stories /= c.parcel_coverage
df['stories'] = np.ceil(stories)
df['build_cost_sqft'] = self._building_cost(uses_distrib, stories)
df['build_cost'] = df.build_cost_sqft * df.building_sqft
df['park_cost'] = parking_cost
df['cost'] = df.build_cost + df.park_cost
df['ave_cost_sqft'] = (df.cost / df.total_sqft) * c.profit_factor
if name == 'retail':
df['ave_cost_sqft'][c.fars > c.max_retail_height] = np.nan
if name == 'industrial':
df['ave_cost_sqft'][c.fars > c.max_industrial_height] = np.nan
df_d[(name, parking_config)] = df
# from here on out we need the min rent for a form and a far
min_ave_cost_sqft_d = {}
bignum = 999999
for name in keys:
min_ave_cost_sqft = None
for parking_config in c.parking_configs:
ave_cost_sqft = df_d[(name, parking_config)][
'ave_cost_sqft'].fillna(bignum)
min_ave_cost_sqft = ave_cost_sqft if min_ave_cost_sqft is None \
else np.minimum(min_ave_cost_sqft, ave_cost_sqft)
min_ave_cost_sqft = min_ave_cost_sqft.replace(bignum, np.nan)
# this is the minimum cost per sqft for this form and far
min_ave_cost_sqft_d[name] = min_ave_cost_sqft
self.dev_d = df_d
self.min_ave_cost_d = min_ave_cost_sqft_d
def get_debug_info(self, form, parking_config):
"""
Get the debug info after running the pro forma for a given form and parking
configuration
Parameters
----------
form : string
The form to get debug info for
parking_config : string
The parking configuration to get debug info for
Returns
-------
debug_info : dataframe
A dataframe where the index is the far with many columns
representing intermediate steps in the pro forma computation.
Additional documentation will be added at a later date, although
many of the columns should be fairly self-expanatory.
"""
return self.dev_d[(form, parking_config)]
def get_ave_cost_sqft(self, form):
"""
Get the average cost per sqft for the pro forma for a given form
Parameters
----------
form : string
Get a series representing the average cost per sqft for each form in
the config
Returns
-------
cost : series
A series where the index is the far and the values are the average
cost per sqft at which the building is "break even" given the
configuration parameters that were passed at run time.
"""
return self.min_ave_cost_d[form]
def lookup(self, form, df, only_built=True, pass_through=None):
"""
This function does the developer model lookups for all the actual input data.
Parameters
----------
form : string
One of the forms specified in the configuration file
df: dataframe
Pass in a single data frame which is indexed by parcel_id and has the
following columns
only_built : bool
Whether to return only those buildings that are profitable and allowed
by zoning, or whether to return as much information as possible, even if
unlikely to be built (can be used when development might be subsidized
or when debugging)
pass_through : list of strings
List of field names to take from the input parcel frame and pass
to the output feasibility frame - is usually used for debugging
purposes - these fields will be passed all the way through
developer
Input Dataframe Columns
rent : dataframe
A set of columns, one for each of the uses passed in the configuration.
Values are yearly rents for that use. Typical column names would be
"residential", "retail", "industrial" and "office"
land_cost : series
A series representing the CURRENT yearly rent for each parcel. Used to
compute acquisition costs for the parcel.
parcel_size : series
A series representing the parcel size for each parcel.
max_far : series
A series representing the maximum far allowed by zoning. Buildings
will not be built above these fars.
max_height : series
A series representing the maxmium height allowed by zoning. Buildings
will not be built above these heights. Will pick between the min of
the far and height, will ignore on of them if one is nan, but will not
build if both are nan.
max_dua : series, optional
A series representing the maximum dwelling units per acre allowed by
zoning. If max_dua is passed, the average unit size should be passed
below to translate from dua to floor space.
ave_unit_size : series, optional
This is required if max_dua is passed above, otherwise it is optional.
This is the same as the parameter to Developer.pick() (it should be the
same series).
Returns
-------
index : Series, int
parcel identifiers
building_sqft : Series, float
The number of square feet for the building to build. Keep in mind
this includes parking and common space. Will need a helpful function
to convert from gross square feet to actual usable square feet in
residential units.
building_cost : Series, float
The cost of constructing the building as given by the
ave_cost_per_sqft from the cost model (for this FAR) and the number
of square feet.
total_cost : Series, float
The cost of constructing the building plus the cost of acquisition of
the current parcel/building.
building_revenue : Series, float
The NPV of the revenue for the building to be built, which is the
number of square feet times the yearly rent divided by the cap
rate (with a few adjustment factors including building efficiency).
max_profit_far : Series, float
The FAR of the maximum profit building (constrained by the max_far and
max_height from the input dataframe).
max_profit :
The profit for the maximum profit building (constrained by the max_far
and max_height from the input dataframe).
"""
# don't really mean to edit the df that's passed in
df = df.copy()
c = self.config
cost_sqft = self.get_ave_cost_sqft(form)
cost_sqft_col = np.reshape(cost_sqft.values, (-1, 1))
cost_sqft_index_col = np.reshape(cost_sqft.index.values, (-1, 1))
# weighted rent for this form
df['weighted_rent'] = np.dot(df[c.uses], c.forms[form])
# min between max_fars and max_heights
df['max_far_from_heights'] = df.max_height / c.height_per_story * \
c.parcel_coverage
# now also minimize with max_dua from zoning - since this pro forma is
# really geared toward per sqft metrics, this is a bit tricky. dua
# is converted to floorspace and everything just works (floor space
# will get covered back to units in developer.pick() but we need to
# test the profitability of the floorspace allowed by max_dua here.
if 'max_dua' in df.columns:
# if max_dua is in the data frame, ave_unit_size must also be there
assert 'ave_unit_size' in df.columns
# so this is the max_dua times the parcel size in acres, which gives
# the number of units that are allowable on the parcel, times
# by the average unit size which gives the square footage of
# those units, divided by the building efficiency which is a
# factor that indicates that the actual units are not the whole
# FAR of the building and then divided by the parcel size again
# in order to get FAR - I recognize that parcel_size actually
# cancels here as it should, but the calc was hard to get right
# and it's just so much more transparent to have it in there twice
df['max_far_from_dua'] = df.max_dua * \
(df.parcel_size / 43560) * \
df.ave_unit_size / self.config.building_efficiency / \
df.parcel_size
df['min_max_fars'] = df[['max_far_from_heights', 'max_far',
'max_far_from_dua']].min(axis=1)
else:
df['min_max_fars'] = df[['max_far_from_heights', 'max_far']].min(axis=1)
if only_built:
df = df.query('min_max_fars > 0 and parcel_size > 0')
# all possible fars on all parcels
fars = np.repeat(cost_sqft_index_col, len(df.index), axis=1)
# zero out fars not allowed by zoning
fars[fars > df.min_max_fars.values + .01] = np.nan
# parcel sizes * possible fars
building_bulks = fars * df.parcel_size.values
# cost to build the new building
building_costs = building_bulks * cost_sqft_col
# add cost to buy the current building
total_costs = building_costs + df.land_cost.values
# rent to make for the new building
building_revenue = building_bulks * c.building_efficiency *\
df.weighted_rent.values / c.cap_rate
# profit for each form
profit = building_revenue - total_costs
profit = profit.astype('float')
profit[np.isnan(profit)] = -np.inf
maxprofitind = np.argmax(profit, axis=0)
def twod_get(indexes, arr):
return arr[indexes, np.arange(indexes.size)].astype('float')
outdf = pd.DataFrame({
'building_sqft': twod_get(maxprofitind, building_bulks),
'building_cost': twod_get(maxprofitind, building_costs),
'total_cost': twod_get(maxprofitind, total_costs),
'building_revenue': twod_get(maxprofitind, building_revenue),
'max_profit_far': twod_get(maxprofitind, fars),
'max_profit': twod_get(maxprofitind, profit)
}, index=df.index)
if pass_through:
outdf[pass_through] = df[pass_through]
if only_built:
outdf = outdf.query('max_profit > 0').copy()
resratio = c.res_ratios[form]
nonresratio = 1.0 - resratio
outdf["residential_sqft"] = outdf.building_sqft * c.building_efficiency * resratio
outdf["non_residential_sqft"] = outdf.building_sqft * nonresratio
outdf["stories"] = outdf["max_profit_far"] / c.parcel_coverage
return outdf
def _debug_output(self):
"""
this code creates the debugging plots to understand
the behavior of the hypothetical building model
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
c = self.config
df_d = self.dev_d
keys = df_d.keys()
keys.sort()
for key in keys:
logger.debug("\n" + str(key) + "\n")
logger.debug(df_d[key])
for form in self.config.forms:
logger.debug("\n" + str(key) + "\n")
logger.debug(self.get_ave_cost_sqft(form))
keys = c.forms.keys()
keys.sort()
cnt = 1
share = None
fig = plt.figure(figsize=(12, 3 * len(keys)))
fig.suptitle('Profitable rents by use', fontsize=40)
for name in keys:
sumdf = None
for parking_config in c.parking_configs:
df = df_d[(name, parking_config)]
if sumdf is None:
sumdf =
|
pd.DataFrame(df['far'])
|
pandas.DataFrame
|
"""
Created on Jan 09 2021
<NAME> and <NAME>
database analysis from
https://data.gov.il/dataset/covid-19
Israel sities coordinates data
https://data-israeldata.opendata.arcgis.com/
"""
import json
import requests
import sys
import extract_israel_data
from Utils import *
import time
import pandas as pd
import os
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
from plotly.subplots import make_subplots
import datetime
import numpy as np
import warnings
plt.style.use('default')
warnings.filterwarnings("ignore")
line_statistic_plot_log=None
line_statistic_plot_fix_date=False
# data line plot
def line_statistic_plot(db, base, fields, title, ylabel, legend, text, save_name, log=None, fix_date=False):
f, ax = plt.subplots(figsize=(18, 6))
date = db[base]
date = pd.to_datetime(date)
len_data = len(date)
colors = plotly.colors.qualitative.Dark24 # ['blue', 'green', 'magenta', 'black', 'red', 'cyan', 'yellow']
sum_case = []
for cnt in range(len(fields)):
case = fields[cnt]
sum_case.append(db[case].max())
plt.plot(date, db[case], zorder=1, color=colors[cnt], linewidth=3)
plt.title(title, fontsize=20)
plt.ylabel(ylabel, fontsize=16)
plt.legend(legend, fontsize=14)
if fix_date:
datemin = pd.to_datetime('2020-03-01')
datemax = pd.to_datetime('2021-03-01')
else:
datemin = date.min()
datemax = date.max()
ax.set_xlim(datemin, datemax)
ax.grid(True)
# rotate and align the tick labels so they look better
locator = mdates.AutoDateLocator()
formatter = mdates.ConciseDateFormatter(locator)
ax.fmt_xdata = formatter
f.autofmt_xdate()
if log:
ax.set_yscale('log')
if text is not None:
tline = 0.25*max(sum_case)
for kk in range(len(text)):
plt.plot((text[kk], text[kk]), (0, tline), '-k', linewidth=3)
plt.text(text[kk], 1.1*tline, text[kk].strftime('%d/%m/%y'), horizontalalignment='center', fontweight='bold', fontsize=14)
save_string = save_name + datemax.strftime('%d%m%y') + '.png'
f.savefig(os.path.join(os.getcwd(), time.strftime("%d%m%Y"), save_string))
# Begin
full_data_file = os.path.join(os.getcwd(), time.strftime("%d%m%Y"), time.strftime("%d%m%Y") + '_loaded_files.csv')
if os.path.exists(full_data_file):
files_db = pd.read_csv(full_data_file, encoding="ISO-8859-8")
first_plt = False
else:
os.makedirs(os.path.join(os.getcwd(), time.strftime("%d%m%Y")), exist_ok=True)
# Extract Data from Israel Dataset COVID-19
files_db = extract_israel_data.extract_israel_data()
first_plt = True
# Print LOG to file
stdoutOrigin = sys.stdout
fout = open(os.path.join(os.getcwd(), time.strftime("%d%m%Y"), 'israel_status_log.txt'), 'a')
sys.stdout = MyWriter(sys.stdout, fout)
text = None
# text = pd.date_range('2020-04-01', '2021-04-01', freq="MS")
# Isolation
isolated = pd.read_csv(files_db.current_file_path[files_db.current_file_name.str.find('isolation').values.argmax()])
###################################################################################################################
id = files_db.current_file_name.str.find('isolation').values.argmax()
print([files_db.last_update[id], files_db.current_file_name[id], files_db.name[id]])
base = 'date'
isolated[base] = pd.to_datetime(isolated[base])
isolated = isolated.sort_values([base])
for key in isolated.keys():
try:
isolated.loc[isolated[key].str.contains('15>') != False, key] = 15
isolated[key] = isolated[key].astype(int)
except:
pass
iso1 = isolated.new_contact_with_confirmed.astype(int).sum()
iso2 = isolated.new_from_abroad.astype(int).sum()
title = 'Israel (data from ' + isolated[base].max().strftime('%d/%m/%y') + ') - isolated persons, total ' + str(iso1+iso2) + ', now ' +\
str(isolated.isolated_today_contact_with_confirmed.iloc[-1] + isolated.isolated_today_abroad.iloc[-1])
ylabel = 'Number of individuals'
legend = ('Isolated due to contact with confirmed, total ' + str(iso1), 'Isolated due to arrived from abroad, total ' + str(iso2))
save_name = 'israelIsolatedPersons_'
fields = ['isolated_today_contact_with_confirmed', 'isolated_today_abroad']
# plot Isolated Total
line_statistic_plot(isolated, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
# plot isolated daily
fields = ['new_contact_with_confirmed', 'new_from_abroad']
save_name = 'israelIsolatedPersons_Daily_'
title = 'Israel (data from ' + isolated[base].max().strftime('%d/%m/%y') + ') - Daily isolated persons, total ' + str(iso1+iso2) + ', now ' +\
str(isolated.isolated_today_contact_with_confirmed.iloc[-1] + isolated.isolated_today_abroad.iloc[-1])
line_statistic_plot(isolated, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
del isolated
###################################################################################################################
# Medical Staff
coronaMediaclStaffD = pd.read_csv(files_db.current_file_path[files_db.current_file_name.str.find('medical_staff').values.argmax()])
###################################################################################################################
id = files_db.current_file_name.str.find('medical_staff').values.argmax()
print([files_db.last_update[id], files_db.current_file_name[id], files_db.name[id]])
base = 'Date'
coronaMediaclStaffD[base] = pd.to_datetime(coronaMediaclStaffD[base])
coronaMediaclStaffD = coronaMediaclStaffD.sort_values([base])
for key in coronaMediaclStaffD.keys():
try:
coronaMediaclStaffD.loc[coronaMediaclStaffD[key].str.contains('<15') != False, key] = 15
coronaMediaclStaffD[key] = coronaMediaclStaffD[key].astype(int)
except:
pass
ylabel = 'Number of individuals'
title = 'Israel - medical staff confirmed (data from ' + coronaMediaclStaffD[base].max().strftime('%d/%m/%y') + ')'
save_name = 'coronaMediaclStaffConfirmed_'
fields = ['confirmed_cases_physicians', 'confirmed_cases_nurses', 'confirmed_cases_other_healthcare_workers']
legend = ['Confirmed physicians', 'Confirmed nurses', 'Confirmed other healthcare workers']
# plot coronaMediaclStaffConfirmed Total
line_statistic_plot(coronaMediaclStaffD, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
# plot coronaMediaclStaffIsolated daily
title = 'Israel - medical staff in isolation (data from ' + coronaMediaclStaffD[base].max().strftime('%d/%m/%y') + ')'
fields = ['isolated_physicians', 'isolated_nurses', 'isolated_other_healthcare_workers']
legend = ['Isolated physicians', 'Isolated nurses', 'Isolated other healthcare workers']
save_name = 'coronaMediaclStaffIsolated_'
line_statistic_plot(coronaMediaclStaffD, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
del coronaMediaclStaffD
###################################################################################################################
# Hospitalization
hospitalization = pd.read_csv(files_db.current_file_path[files_db.current_file_name.str.find('hospitalization').values.argmax()])
###################################################################################################################
id = files_db.current_file_name.str.find('hospitalization').values.argmax()
print([files_db.last_update[id], files_db.current_file_name[id], files_db.name[id]])
base = 'תאריך'
hospitalization[base] = pd.to_datetime(hospitalization[base])
hospitalization = hospitalization.sort_values([base])
for key in hospitalization.keys():
try:
hospitalization.loc[hospitalization[key].str.contains('15>') != False, key] = 15
hospitalization.loc[hospitalization[key].str.contains('<15') != False, key] = 15
hospitalization[key] = hospitalization[key].astype(int)
except:
pass
ylabel = 'Number of individuals [persons]'
title = 'Israel - Critical conditions (data from ' + hospitalization[base].max().strftime('%d/%m/%y') + ')'
save_name = 'israelHospitalized_'
fields = ['מונשמים', 'חולים קשה', 'מאושפזים']
legend = ('Ventilated patients', 'Seriously ill', 'Hospitalized')
# plot israelHospitalized Total
line_statistic_plot(hospitalization, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
title = 'Israel - Critical conditions mean Age division (data from ' + hospitalization[base].max().strftime('%d/%m/%y') + ')'
save_name = 'israelHospitalizedInAge_'
fields = ['גיל ממוצע מונשמים', 'גיל ממוצע חולים קשה', 'גיל ממוצע מאושפזים']
legend = ('Ventilated patients', 'Seriously ill', 'Hospitalized')
# plot israelHospitalizeInAgeTotal
line_statistic_plot(hospitalization, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
title = 'Israel - Critical conditions percentage of Women (data from ' + hospitalization[base].max().strftime('%d/%m/%y') + ')'
save_name = 'israelHospitalizedInWomens_'
fields = ['אחוז נשים מונשמות', 'אחוז נשים חולות קשה', 'אחוז נשים מאושפזות']
legend = ('Ventilated patients', 'Seriously ill', 'Hospitalized')
# plot israelHospitalizeInAgeTotal
line_statistic_plot(hospitalization, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
# plot israel Ill
title = 'Israel - ill conditions (data from ' + hospitalization[base].max().strftime('%d/%m/%y') + ')'
fields = ['חולים קל', 'חולים בינוני', 'חולים קשה']
legend = ('Light ill', 'Mild ill', 'Seriously ill')
save_name = 'illConditions_'
line_statistic_plot(hospitalization, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
# plot israel mean Age Ill
title = 'Israel - ill conditions mean Age division (data from ' + hospitalization[base].max().strftime('%d/%m/%y') + ')'
fields = ['גיל ממוצע חולים קל', 'גיל ממוצע חולים בינוני', 'גיל ממוצע חולים קשה']
legend = ('Light ill', 'Mild ill', 'Seriously ill')
save_name = 'illConditionsInAge_'
line_statistic_plot(hospitalization, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
# plot israel Women Percentage Ill
title = 'Israel - ill conditions percentage of Women (data from ' + hospitalization[base].max().strftime('%d/%m/%y') + ')'
fields = ['אחוז נשים חולות קל', 'אחוז נשים חולות בינוני', 'אחוז נשים חולות קשה']
legend = ('Light ill', 'Middle ill', 'Seriously ill')
save_name = 'illConditionsInWomens_'
line_statistic_plot(hospitalization, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
del hospitalization
###################################################################################################################
# Recovered
recovered = pd.read_excel(files_db.current_file_path[files_db.current_file_name.str.find('recovered').values.argmax()], encoding="ISO-8859-8")
###################################################################################################################
id = files_db.current_file_name.str.find('recovered').values.argmax()
print([files_db.last_update[id], files_db.current_file_name[id], files_db.name[id]])
recoveredMeanTime = recovered.days_between_pos_and_recovery.mean()
recoveredMedianTime = recovered.days_between_pos_and_recovery.median()
print('Recovered Mean Time: ' + str(int(recoveredMeanTime*100)/100) + ' days')
print('Recovered Median Time: ' + str(int(recoveredMedianTime*100)/100) + ' days')
NN = int(recovered.days_between_pos_and_recovery.max())
hh = np.histogram(recovered.days_between_pos_and_recovery, bins=np.arange(NN+1))
f, ax = plt.subplots(figsize=(15, 6))
plt.plot(hh[1][1:], hh[0], linewidth=3)
# ax.set_yscale('log')
plt.plot([recoveredMedianTime, recoveredMedianTime], [0, hh[0].max()], 'k--')
plt.text(recoveredMedianTime, hh[0].max(), ' Recovered Median Time: ' + str(int(recoveredMedianTime*100)/100) + ' days')
plt.plot([recoveredMeanTime, recoveredMeanTime], [0, hh[0][int(recoveredMeanTime)]], 'k--')
plt.text(recoveredMeanTime, hh[0][int(recoveredMeanTime)], ' Recovered Mean Time: ' + str(int(recoveredMeanTime*100)/100) + ' days')
plt.grid()
plt.xlabel('Time to recovered [days]', fontsize=16)
plt.ylabel('Number of individuals [persons]', fontsize=16)
try:
data_from = pd.to_datetime(str(files_db.last_update[id]))
plt.title('Israel - Time to recovered. Num of persons ' + str(int(hh[0].sum())) + ' (data from ' + data_from.strftime('%d/%m/%y') + ')', fontsize=16)
except:
plt.title('Israel - Time to recovered. Num of persons ' + str(int(hh[0].sum())) + ' (data from ' + str(files_db.last_update[id]) + ')', fontsize=16)
save_string = 'israelRecovered_' + str(files_db.last_update[id]) + '.png'
f.savefig(os.path.join(os.getcwd(), time.strftime("%d%m%Y"), save_string))
del recovered
###################################################################################################################
# Deceased
deceased = pd.read_csv(files_db.current_file_path[files_db.current_file_name.str.find('deceased').values.argmax()], encoding='latin-1')
###################################################################################################################
id = files_db.current_file_name.str.find('deceased').values.argmax()
print([files_db.last_update[id], files_db.current_file_name[id], files_db.name[id]])
deceasedMeanTime = deceased.Time_between_positive_and_death.mean()
deceasedMedianTime = deceased.Time_between_positive_and_death.median()
print('Deceased Mean Time: ' + str(int(deceasedMeanTime*100)/100) + ' days')
print('Deceased Median Time: ' + str(int(deceasedMedianTime*100)/100) + ' days')
NN = int(deceased.Time_between_positive_and_death.max())
hh = np.histogram(deceased.Time_between_positive_and_death, bins=np.arange(NN+1))
f, ax = plt.subplots(figsize=(15, 6))
plt.plot(hh[1][1:], hh[0], linewidth=3)
plt.plot([deceasedMedianTime, deceasedMedianTime], [0, hh[0].max()], 'k--')
plt.text(deceasedMedianTime, hh[0].max(), ' Deceased Median Time: ' + str(int(deceasedMedianTime*100)/100) + ' days')
plt.plot([deceasedMeanTime, deceasedMeanTime], [0, hh[0][int(deceasedMeanTime)]], 'k--')
plt.text(deceasedMeanTime, hh[0][int(deceasedMeanTime)], ' Deceased Mean Time: ' + str(int(deceasedMeanTime*100)/100) + ' days')
plt.grid()
plt.xlabel('Time to deceased [days]', fontsize=16)
plt.ylabel('Number of individuals [persons]', fontsize=16)
try:
plt.title('Israel - Time to deceased. Num of persons ' + str(int(hh[0].sum())) + '. Num of Ventilated ' +
str(int(deceased.Ventilated.sum())) + ' (data from ' + data_from.strftime('%d/%m/%y') + ')', fontsize=16)
except:
plt.title('Israel - Time to deceased. Num of persons ' + str(int(hh[0].sum())) + '. Num of Ventilated ' +
str(int(deceased.Ventilated.sum())) + ' (data from ' + str(files_db.last_update[id]) + ')', fontsize=16)
save_string = 'israelDeceased_' + str(files_db.last_update[id]) + '.png'
f.savefig(os.path.join(os.getcwd(), time.strftime("%d%m%Y"), save_string))
del deceased
###################################################################################################################
plt.close('all')
# Lab Test
lab_tests = pd.read_csv(files_db.current_file_path[files_db.current_file_name.str.find('lab_tests').values.argmax()])
###################################################################################################################
id = files_db.current_file_name.str.find('lab_tests').values.argmax()
print([files_db.last_update[id], files_db.current_file_name[id], files_db.name[id]])
base = 'result_date'
# lab_tests.loc[lab_tests['result_date'].isna() != False, 'result_date'] = lab_tests.loc[lab_tests['result_date'].isna() != False, 'test_date']
lab_tests = lab_tests[lab_tests['result_date'].isna() != True]
N = len(lab_tests.corona_result)
lab_tests[base] = pd.to_datetime(lab_tests[base])
lab_tests = lab_tests.sort_values([base])
possible_results = lab_tests.corona_result.unique()
FirstTest = lab_tests.loc[lab_tests['is_first_Test'].str.contains('Yes') != False, ['result_date', 'corona_result']].reset_index()
first_grouped = FirstTest.groupby(['result_date', 'corona_result'], as_index=False).count()
first = first_grouped.set_index(['result_date', 'corona_result']).unstack().fillna(0).astype(int).add_prefix('ראשון ')
del FirstTest, first_grouped
first_positive = first.xs("ראשון חיובי", level="corona_result", axis=1).values.squeeze()
first_negative = first.xs("ראשון שלילי", level="corona_result", axis=1).values.squeeze()
all_first = first.sum(axis=1).values.squeeze()
other_first = all_first - first_negative - first_positive
NotFirstTest = lab_tests.loc[lab_tests['is_first_Test'].str.contains('Yes') != True, ['result_date', 'corona_result']].reset_index()
not_first_grouped = NotFirstTest.groupby(['result_date', 'corona_result'], as_index=False).count()
not_first = not_first_grouped.set_index(['result_date', 'corona_result']).unstack().fillna(0).astype(int).add_prefix('לא ראשון ')
del NotFirstTest, not_first_grouped
not_first_positive = not_first.xs("לא ראשון חיובי", level="corona_result", axis=1).values.squeeze()
not_first_negative = not_first.xs("לא ראשון שלילי", level="corona_result", axis=1).values.squeeze()
all_not_first = not_first.sum(axis=1).values.squeeze()
other_not_first = all_not_first - not_first_positive - not_first_negative
full_lab_data = pd.concat([first.squeeze(), not_first.squeeze()], axis=1, sort=False)
# Saving full data
full_lab_data.to_csv(os.path.join(os.getcwd(), time.strftime("%d%m%Y"),
time.strftime("%d%m%Y") + 'complete_laboratory_data.csv'), encoding="windows-1255")
dateList = pd.DataFrame(lab_tests[base].unique(), columns=['Date'])
fields = ['PositiveFirst', 'NegativeFirst', 'OtherFirst', 'PositiveNotFirst', 'NegativeNotFirst', 'OtherNotFirst']
lab_data = pd.concat([dateList, pd.DataFrame(first_positive, columns=[fields[0]]),
pd.DataFrame(first_negative, columns=[fields[1]]),
pd.DataFrame(other_first, columns=[fields[2]]),
pd.DataFrame(not_first_positive, columns=[fields[3]]),
pd.DataFrame(not_first_negative, columns=[fields[4]]),
pd.DataFrame(other_not_first, columns=[fields[5]])],
axis=1, sort=False)
title = 'Israel ' + dateList.Date.max().strftime('%d/%m/%y') + ' - count of first test per person. Total tests performed ' + str(int(N))
ylabel = 'Number of individuals'
save_name = 'israelTestPerformed_'
base = 'Date'
legend = ['Positive First test, total ' + str(int(lab_data.PositiveFirst.sum())),
'Negative First test, total ' + str(int(lab_data.NegativeFirst.sum())),
'Other First test, total ' + str(int(lab_data.OtherFirst.sum())),
'Positive not a First test, total ' + str(int(lab_data.PositiveNotFirst.sum())),
'Negative not a First test, total ' + str(int(lab_data.NegativeNotFirst.sum())),
'Other not a First test, total ' + str(int(lab_data.OtherNotFirst.sum())), ]
# plot Test Performed Total
line_statistic_plot(lab_data, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
# plot Test Performed Total Log
save_name = 'israelTestPerformed_Logy_'
line_statistic_plot(lab_data, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
del lab_tests
###################################################################################################################
# Individuals
individuals = pd.read_csv(files_db.current_file_path[files_db.current_file_name.str.find('tested_individuals_ver').values.argmax()])
individuals_last = pd.read_csv(files_db.current_file_path[files_db.current_file_name.str.find('tested_individuals_subset').values.argmax()])
###################################################################################################################
id = files_db.current_file_name.str.find('tested_individual').values.argmax()
print([files_db.last_update[id], files_db.current_file_name[id], files_db.name[id]])
base = 'test_date'
individuals = individuals[individuals['test_date'].isna() != True]
N = len(individuals.corona_result)
individuals[base] = pd.to_datetime(individuals[base])
individuals = individuals.sort_values([base])
individuals_last[base] = pd.to_datetime(individuals_last[base])
individuals_last = individuals_last.sort_values([base])
individuals = individuals[(individuals['test_date'] >= individuals_last['test_date'].unique().min()).values != True]
individuals = pd.concat([individuals, individuals_last])
individuals['symptoms'] = individuals.loc[:, ['cough', 'fever', 'sore_throat', 'shortness_of_breath', 'head_ache']].sum(axis=1)
possible_results = individuals.corona_result.unique()
dateList = pd.DataFrame(individuals[base].unique(), columns=['Date'])
# TestIndication
PosTest = individuals.loc[individuals['corona_result'].str.contains('חיובי') != False, ['test_date', 'test_indication']].reset_index()
posindicate = PosTest.groupby(['test_date', 'test_indication'], as_index=False).count()
posindicate = posindicate.set_index(['test_date', 'test_indication']).unstack().fillna(0).astype(int)
# plot israelPositiveTestIndication
fields = ['Abroad', 'Contact with confirmed', 'Other']
title = 'Israel (data from ' + dateList.Date.max().strftime('%d/%m/%y') + ')- Positive test indication (Total tests performed ' + str(int(N)) + ')'
ylabel = 'Number of positive tests'
save_name = 'israelPositiveTestIndication_'
Abroad = posindicate.xs('Abroad', level="test_indication", axis=1).values.squeeze()
Contact = posindicate.xs('Contact with confirmed', level="test_indication", axis=1).values.squeeze()
Other = posindicate.xs('Other', level="test_indication", axis=1).values.squeeze()
legend = ['Abroad, total ' + str(int(Abroad.sum())),
'Contact with confirmed, total ' + str(int(Contact.sum())),
'Other, total ' + str(int(Other.sum()))]
pos_indicate = pd.concat([dateList, pd.DataFrame(Abroad, columns=[fields[0]]),
pd.DataFrame(Contact, columns=[fields[1]]),
pd.DataFrame(Other, columns=[fields[2]])],
axis=1, sort=False)
line_statistic_plot(pos_indicate, 'Date', fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
del posindicate
# Run according to test indication results
name_possible_resuts = ['Other', 'Negative', 'Positive']
for ctest in range(len(possible_results)):
test = possible_results[ctest]
result = name_possible_resuts[ctest]
# Syndromes
syndromes = ['cough', 'fever', 'sore_throat', 'shortness_of_breath', 'head_ache']
# Every Syndrome statistic per day
PosSyindromes = individuals.loc[individuals['corona_result'].str.contains(test) != False, ['test_date', 'cough',
'fever', 'sore_throat', 'shortness_of_breath', 'head_ache']].reset_index()
pos_synd_every = PosSyindromes.groupby(['test_date'], as_index=False).sum()
# plot Positive Syndrome
legend = syndromes
fields = syndromes
title = 'Israel Symptoms for ' + result + ' Result (data from ' + dateList.Date.max().strftime('%d/%m/%y') + ')'
save_name = 'israel' + result + 'TestSymptoms_'
ylabel = 'Symptoms'
# usual plot
line_statistic_plot(pos_synd_every, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
# log plot
save_name = 'israel' + result + 'TestSymptoms_Logy_'
line_statistic_plot(pos_synd_every, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
# Number of positive syndrome statistic per day
PosSyindrome = individuals.loc[individuals['corona_result'].str.contains(test) != False, ['test_date', 'symptoms']].reset_index()
pos_synd = PosSyindrome.groupby(['test_date', 'symptoms'], as_index=False).count()
pos_synd = pos_synd.set_index(['test_date', 'symptoms']).unstack().fillna(0).astype(int)
Noone = pos_synd.xs(0, level="symptoms", axis=1).values.squeeze()
One = pos_synd.xs(1, level="symptoms", axis=1).values.squeeze()
Two = pos_synd.xs(2, level="symptoms", axis=1).values.squeeze()
Three = pos_synd.xs(3, level="symptoms", axis=1).values.squeeze()
Four = pos_synd.xs(4, level="symptoms", axis=1).values.squeeze()
Five = pos_synd.xs(5, level="symptoms", axis=1).values.squeeze()
legend = ['No symptoms (asymptomatic), total ' + str(int(Noone.sum())),
'One symptom, total ' + str(int(One.sum())),
'Two symptoms, total ' + str(int(Two.sum())),
'Three symptoms, total ' + str(int(Three.sum())),
'Four symptoms, total ' + str(int(Four.sum())),
'Five symptoms, total ' + str(int(Five.sum()))]
fields = ['No', 'One', 'Two', 'Three', 'Four', 'Five']
pos_syndrome = pd.concat([dateList, pd.DataFrame(Noone, columns=[fields[0]]),
pd.DataFrame(One, columns=[fields[1]]),
pd.DataFrame(Two, columns=[fields[2]]),
pd.DataFrame(Three, columns=[fields[3]]),
pd.DataFrame(Four, columns=[fields[4]]),
pd.DataFrame(Five, columns=[fields[5]])],
axis=1, sort=False)
# plot Quantitative Symptoms
title = 'Israel Quantitative Symptoms for ' + result + ' Result (data from ' + dateList.Date.max().strftime('%d/%m/%y') + ')'
save_name = 'israelQuantitative' + result + 'TestSymptoms_'
ylabel = 'Number of Symptoms'
# usual plot
line_statistic_plot(pos_syndrome, 'Date', fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
# log plot
save_name = 'israelQuantitative' + result + 'TestSymptoms_Logy_'
line_statistic_plot(pos_syndrome, 'Date', fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
###################################################################################################################
# Comparison between WHO and Israel Data
###################################################################################################################
try:
db = pd.read_csv(os.path.join(os.getcwd(), time.strftime("%d%m%Y"), 'israel_db.csv'))
db['Date'] = pd.to_datetime(db['Date'])
if db.Date.max() <= lab_data.Date.max():
lab_data = lab_data[(lab_data['Date'] >= db.Date.max()).values != True]
pos_indicate = pos_indicate[(pos_indicate['Date'] >= db.Date.max()).values != True]
else:
db = db[(db['Date'] >= lab_data.Date.max()).values != True]
individ = pd.DataFrame(pos_indicate.iloc[:, 1:].sum(axis=1).values, columns=['Individ'])
compare_db = pd.concat([db[['Date', 'NewConfirmed']], lab_data['PositiveFirst'], individ['Individ']], axis=1, sort=False)
save_name = 'newCasesWHODataVsIsraelData'
title = 'Israel New Confirmed WHO data vs. Israel Ministry of Health data'
ylabel = 'Number of individuals'
legend = ['New cases WHO data, total ' + str(int(db.NewConfirmed.sum())) +
' at ' + db.Date.max().strftime('%d/%m/%y'),
'Positive first test from lab_tests.csv data, total ' + str(int(lab_data.PositiveFirst.sum())) +
' at ' + dateList.Date.max().strftime('%d/%m/%y'),
'Positive test from tested_individuals.csv data, total '+str(int(individ['Individ'].sum())) +
' at ' + dateList.Date.max().strftime('%d/%m/%y')
]
fields = ['NewConfirmed', 'PositiveFirst', 'Individ']
title = 'Israel Symptoms for ' + result + ' Result (data from ' + dateList.Date.max().strftime('%d/%m/%y') + ')'
base = 'Date'
line_statistic_plot(compare_db, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
except:
print('The file israel_db.csv is not loaded')
###################################################################################################################
# Load Geographical data of Israel Cities
###################################################################################################################
url = 'https://opendata.arcgis.com/datasets/a589d87604c6477ca4afb78f205b98fb_0.geojson'
r = requests.get(url)
data = json.loads(r.content)
df =
|
pd.json_normalize(data, ['features'])
|
pandas.json_normalize
|
from __future__ import print_function, division
import numpy as np
import pandas as pd
import os, os.path, shutil
import re
import logging
try:
import cPickle as pickle
except ImportError:
import pickle
from pkg_resources import resource_filename
from configobj import ConfigObj
from scipy.integrate import quad
from tqdm import tqdm
from schwimmbad import choose_pool
from astropy.coordinates import SkyCoord
from isochrones.starmodel import StarModel
from isochrones.dartmouth import Dartmouth_Isochrone
from .transitsignal import TransitSignal
from .populations import PopulationSet
from .populations import fp_fressin
from .fpp import FPPCalculation
from .stars import get_AV_infinity
try:
from keputils.koiutils import koiname
from keputils import koiutils as ku
from keputils import kicutils as kicu
except ImportError:
logging.warning('keputils not available')
#from simpledist import distributions as dists
import kplr
KPLR_ROOT = os.getenv('KPLR_ROOT', os.path.expanduser('~/.kplr'))
JROWE_DIR = os.getenv('JROWE_DIR', os.path.expanduser('~/.jrowe'))
JROWE_FILE = resource_filename('vespa', 'data/jrowe_mcmc_fits.csv')
JROWE_DATA =
|
pd.read_csv(JROWE_FILE, index_col=0)
|
pandas.read_csv
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
from sklearn.ensemble import RandomForestClassifier
import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import os
from tqdm import tqdm
import joblib
# In[3]:
def strip(data):
columns = data.columns
new_columns = []
for i in range(len(columns)):
new_columns.append(columns[i].strip())
return new_columns
# In[4]:
Agreement_csv = ['bittorrent.csv',
'dns.csv',
'ftp.csv',
'httphttps.csv',
'pop3.csv',
'smtp.csv',
'ssh.csv',
'telnet.csv']
# In[5]:
path = '../Data'
for root, dirs, files in os.walk(path):
print(files)
if files==[]:
break
csv = files
# In[6]:
csv = [i for i in csv if i not in Agreement_csv]
# In[7]:
data = pd.DataFrame()
for single_csv in csv:
if 'csv' in single_csv:
temp = pd.read_csv(os.path.join(path,single_csv))
else:
temp = pd.read_excel(os.path.join(path,single_csv))
data = pd.concat([temp,data],axis=0)
data.columns = strip(data)
# In[8]:
Label_encoder = {data['Label'].unique()[i]:i for i in range(len(data['Label'].unique()))}
# In[9]:
data['Label']=data['Label'].apply(lambda x:Label_encoder[x])
# In[10]:
data
# In[11]:
positive_len = len(data[data.Label!=1])
negative = data[data['Label'] == 1].sample(n = positive_len)
data_balance =
|
pd.concat([negative,data[data.Label!=1]])
|
pandas.concat
|
# encoding: UTF-8
'''
本文件中包含的是CTA模块的组合回测引擎,回测引擎的API和CTA引擎一致,
可以使用和实盘相同的代码进行回测。
华富资产 李来佳
'''
from __future__ import division
import sys
import os
import gc
import pandas as pd
import numpy as np
import traceback
import random
import bz2
import pickle
from datetime import datetime, timedelta
from time import sleep
from vnpy.trader.object import (
TickData,
BarData,
RenkoBarData,
)
from vnpy.trader.constant import (
Exchange,
)
from vnpy.trader.utility import (
extract_vt_symbol,
get_underlying_symbol,
get_trading_date,
import_module_by_str
)
from .back_testing import BackTestingEngine
# vnpy交易所,与淘宝数据tick目录得对应关系
VN_EXCHANGE_TICKFOLDER_MAP = {
Exchange.SHFE.value: 'SQ',
Exchange.DCE.value: 'DL',
Exchange.CZCE.value: 'ZZ',
Exchange.CFFEX.value: 'ZJ',
Exchange.INE.value: 'SQ'
}
class PortfolioTestingEngine(BackTestingEngine):
"""
CTA组合回测引擎, 使用回测引擎作为父类
函数接口和策略引擎保持一样,
从而实现同一套代码从回测到实盘。
针对1分钟bar的回测 或者tick回测
导入CTA_Settings
"""
def __init__(self, event_engine=None):
"""Constructor"""
super().__init__(event_engine)
self.bar_csv_file = {}
self.bar_df_dict = {} # 历史数据的df,回测用
self.bar_df = None # 历史数据的df,时间+symbol作为组合索引
self.bar_interval_seconds = 60 # bar csv文件,属于K线类型,K线的周期(秒数),缺省是1分钟
self.tick_path = None # tick级别回测, 路径
self.use_tq = False # True:使用tq csv数据; False:使用淘宝购买的csv数据(19年之前)
self.use_pkb2 = True # 使用tdx下载的逐笔成交数据(pkb2压缩格式),模拟tick
def load_bar_csv_to_df(self, vt_symbol, bar_file, data_start_date=None, data_end_date=None):
"""加载回测bar数据到DataFrame"""
self.output(u'loading {} from {}'.format(vt_symbol, bar_file))
if vt_symbol in self.bar_df_dict:
return True
if bar_file is None or not os.path.exists(bar_file):
self.write_error(u'回测时,{}对应的csv bar文件{}不存在'.format(vt_symbol, bar_file))
return False
try:
data_types = {
"datetime": str,
"open": float,
"high": float,
"low": float,
"close": float,
"open_interest": float,
"volume": float,
"instrument_id": str,
"symbol": str,
"total_turnover": float,
"limit_down": float,
"limit_up": float,
"trading_day": str,
"date": str,
"time": str
}
if vt_symbol.startswith('future_renko'):
data_types.update({
"color": str,
"seconds": int,
"high_seconds": int,
"low_seconds": int,
"height": float,
"up_band": float,
"down_band": float,
"low_time": str,
"high_time": str
})
# 加载csv文件 =》 dateframe
symbol_df = pd.read_csv(bar_file, dtype=data_types)
if len(symbol_df)==0:
print(f'回测时加载{vt_symbol} csv文件{bar_file}失败。', file=sys.stderr)
self.write_error(f'回测时加载{vt_symbol} csv文件{bar_file}失败。')
return False
first_dt = symbol_df.iloc[0]['datetime']
if '.' in first_dt:
datetime_format = "%Y-%m-%d %H:%M:%S.%f"
else:
datetime_format = "%Y-%m-%d %H:%M:%S"
# 转换时间,str =》 datetime
symbol_df["datetime"] = pd.to_datetime(symbol_df["datetime"], format=datetime_format)
# 设置时间为索引
symbol_df = symbol_df.set_index("datetime")
# 裁剪数据
symbol_df = symbol_df.loc[self.test_start_date:self.test_end_date]
self.bar_df_dict.update({vt_symbol: symbol_df})
except Exception as ex:
self.write_error(u'回测时读取{} csv文件{}失败:{}'.format(vt_symbol, bar_file, ex))
self.output(u'回测时读取{} csv文件{}失败:{}'.format(vt_symbol, bar_file, ex))
return False
return True
def comine_bar_df(self):
"""
合并所有回测合约的bar DataFrame =》集中的DataFrame
把bar_df_dict =》bar_df
:return:
"""
self.output('comine_df')
if len(self.bar_df_dict) == 0:
print(f'{self.test_name}:无加载任何数据,请检查bar文件路径配置',file=sys.stderr)
self.output(f'{self.test_name}:无加载任何数据,请检查bar文件路径配置')
self.bar_df = pd.concat(self.bar_df_dict, axis=0).swaplevel(0, 1).sort_index()
self.bar_df_dict.clear()
def prepare_env(self, test_setting):
self.output('portfolio prepare_env')
super().prepare_env(test_setting)
self.use_tq = test_setting.get('use_tq', False)
self.use_pkb2 = test_setting.get('use_pkb2', True)
if self.use_tq:
self.use_pkb2 = False
def prepare_data(self, data_dict):
"""
准备组合数据
:param data_dict: 合约得配置参数
:return:
"""
# 调用回测引擎,跟新合约得数据
super().prepare_data(data_dict)
if len(data_dict) == 0:
self.write_log(u'请指定回测数据和文件')
return
if self.mode == 'tick':
return
# 检查/更新bar文件
for symbol, symbol_data in data_dict.items():
self.write_log(u'配置{}数据:{}'.format(symbol, symbol_data))
bar_file = symbol_data.get('bar_file', None)
if bar_file is None:
self.write_error(u'{}没有配置数据文件')
continue
if not os.path.isfile(bar_file):
self.write_log(u'{0}文件不存在'.format(bar_file))
continue
self.bar_csv_file.update({symbol: bar_file})
def run_portfolio_test(self, strategy_setting: dict = {}):
"""
运行组合回测
"""
if not self.strategy_start_date:
self.write_error(u'回测开始日期未设置。')
return
if len(strategy_setting) == 0:
self.write_error('未提供有效配置策略实例')
return
self.cur_capital = self.init_capital # 更新设置期初资金
if not self.data_end_date:
self.data_end_date = datetime.today()
self.test_end_date = datetime.now().strftime('%Y%m%d')
# 保存回测脚本到数据库
self.save_setting_to_mongo()
self.write_log(u'开始组合回测')
for strategy_name, strategy_setting in strategy_setting.items():
self.load_strategy(strategy_name, strategy_setting)
self.write_log(u'策略初始化完成')
self.write_log(u'开始回放数据')
self.write_log(u'开始回测:{} ~ {}'.format(self.data_start_date, self.data_end_date))
if self.mode == 'bar':
self.run_bar_test()
else:
self.run_tick_test()
def run_bar_test(self):
"""使用bar进行组合回测"""
testdays = (self.data_end_date - self.data_start_date).days
if testdays < 1:
self.write_log(u'回测时间不足')
return
# 加载数据
for vt_symbol in self.symbol_strategy_map.keys():
symbol, exchange = extract_vt_symbol(vt_symbol)
self.load_bar_csv_to_df(vt_symbol, self.bar_csv_file.get(symbol))
# 为套利合约提取主动 / 被动合约
if exchange == Exchange.SPD:
try:
active_symbol, active_rate, passive_symbol, passive_rate, spd_type = symbol.split('-')
active_vt_symbol = '.'.join([active_symbol, self.get_exchange(symbol=active_symbol).value])
passive_vt_symbol = '.'.join([passive_symbol, self.get_exchange(symbol=passive_symbol).value])
self.load_bar_csv_to_df(active_vt_symbol, self.bar_csv_file.get(active_symbol))
self.load_bar_csv_to_df(passive_vt_symbol, self.bar_csv_file.get(passive_symbol))
except Exception as ex:
self.write_error(u'为套利合约提取主动/被动合约出现异常:{}'.format(str(ex)))
# 合并数据
self.comine_bar_df()
last_trading_day = None
bars_dt = None
bars_same_dt = []
gc_collect_days = 0
try:
for (dt, vt_symbol), bar_data in self.bar_df.iterrows():
symbol, exchange = extract_vt_symbol(vt_symbol)
if symbol.startswith('future_renko'):
bar_datetime = dt
bar = RenkoBarData(
gateway_name='backtesting',
symbol=symbol,
exchange=exchange,
datetime=bar_datetime
)
bar.seconds = float(bar_data.get('seconds', 0))
bar.high_seconds = float(bar_data.get('high_seconds', 0)) # 当前Bar的上限秒数
bar.low_seconds = float(bar_data.get('low_seconds', 0)) # 当前bar的下限秒数
bar.height = float(bar_data.get('height', 0)) # 当前Bar的高度限制
bar.up_band = float(bar_data.get('up_band', 0)) # 高位区域的基线
bar.down_band = float(bar_data.get('down_band', 0)) # 低位区域的基线
bar.low_time = bar_data.get('low_time', None) # 最后一次进入低位区域的时间
bar.high_time = bar_data.get('high_time', None) # 最后一次进入高位区域的时间
else:
bar_datetime = dt - timedelta(seconds=self.bar_interval_seconds)
bar = BarData(
gateway_name='backtesting',
symbol=symbol,
exchange=exchange,
datetime=bar_datetime
)
bar.open_price = float(bar_data['open'])
bar.close_price = float(bar_data['close'])
bar.high_price = float(bar_data['high'])
bar.low_price = float(bar_data['low'])
bar.volume = int(bar_data['volume'])
bar.open_interest = float(bar_data.get('open_interest', 0))
bar.date = bar_datetime.strftime('%Y-%m-%d')
bar.time = bar_datetime.strftime('%H:%M:%S')
str_td = str(bar_data.get('trading_day', ''))
if len(str_td) == 8:
bar.trading_day = str_td[0:4] + '-' + str_td[4:6] + '-' + str_td[6:8]
elif len(str_td) == 10:
bar.trading_day = str_td
else:
bar.trading_day = get_trading_date(bar_datetime)
if last_trading_day != bar.trading_day:
self.output(u'回测数据日期:{},资金:{}'.format(bar.trading_day, self.net_capital))
if self.strategy_start_date > bar.datetime:
last_trading_day = bar.trading_day
# bar时间与队列时间一致,添加到队列中
if dt == bars_dt:
bars_same_dt.append(bar)
continue
else:
# bar时间与队列时间不一致,先推送队列的bars
random.shuffle(bars_same_dt)
for _bar_ in bars_same_dt:
self.new_bar(_bar_)
# 创建新的队列
bars_same_dt = [bar]
bars_dt = dt
# 更新每日净值
if self.strategy_start_date <= dt <= self.data_end_date:
if last_trading_day != bar.trading_day:
if last_trading_day is not None:
self.saving_daily_data(datetime.strptime(last_trading_day, '%Y-%m-%d'), self.cur_capital,
self.max_net_capital, self.total_commission)
last_trading_day = bar.trading_day
# 第二个交易日,撤单
self.cancel_orders()
# 更新持仓缓存
self.update_pos_buffer()
gc_collect_days += 1
if gc_collect_days >= 10:
# 执行内存回收
gc.collect()
sleep(1)
gc_collect_days = 0
if self.net_capital < 0:
self.write_error(u'净值低于0,回测停止')
self.output(u'净值低于0,回测停止')
return
self.write_log(u'bar数据回放完成')
if last_trading_day is not None:
self.saving_daily_data(datetime.strptime(last_trading_day, '%Y-%m-%d'), self.cur_capital,
self.max_net_capital, self.total_commission)
except Exception as ex:
self.write_error(u'回测异常导致停止:{}'.format(str(ex)))
self.write_error(u'{},{}'.format(str(ex), traceback.format_exc()))
print(str(ex), file=sys.stderr)
traceback.print_exc()
return
def load_csv_file(self, tick_folder, vt_symbol, tick_date):
"""从文件中读取tick,返回list[{dict}]"""
# 使用天勤tick数据
if self.use_tq:
return self.load_tq_csv_file(tick_folder, vt_symbol, tick_date)
# 使用淘宝下载的tick数据(2019年前)
symbol, exchange = extract_vt_symbol(vt_symbol)
underly_symbol = get_underlying_symbol(symbol)
exchange_folder = VN_EXCHANGE_TICKFOLDER_MAP.get(exchange.value)
if exchange == Exchange.INE:
file_path = os.path.abspath(
os.path.join(
tick_folder,
exchange_folder,
tick_date.strftime('%Y'),
tick_date.strftime('%Y%m'),
tick_date.strftime('%Y%m%d'),
'{}_{}.csv'.format(symbol.upper(), tick_date.strftime('%Y%m%d'))))
else:
file_path = os.path.abspath(
os.path.join(
tick_folder,
exchange_folder,
tick_date.strftime('%Y'),
tick_date.strftime('%Y%m'),
tick_date.strftime('%Y%m%d'),
'{}{}_{}.csv'.format(underly_symbol.upper(), symbol[-2:], tick_date.strftime('%Y%m%d'))))
ticks = []
if not os.path.isfile(file_path):
self.write_log(f'{file_path}文件不存在')
return None
df = pd.read_csv(file_path, encoding='gbk', parse_dates=False)
df.columns = ['date', 'time', 'last_price', 'volume', 'last_volume', 'open_interest',
'bid_price_1', 'bid_volume_1', 'bid_price_2', 'bid_volume_2', 'bid_price_3', 'bid_volume_3',
'ask_price_1', 'ask_volume_1', 'ask_price_2', 'ask_volume_2', 'ask_price_3', 'ask_volume_3', 'BS']
self.write_log(u'加载csv文件{}'.format(file_path))
last_time = None
for index, row in df.iterrows():
# 日期, 时间, 成交价, 成交量, 总量, 属性(持仓增减), B1价, B1量, B2价, B2量, B3价, B3量, S1价, S1量, S2价, S2量, S3价, S3量, BS
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
tick = row.to_dict()
tick.update({'symbol': symbol, 'exchange': exchange.value, 'trading_day': tick_date.strftime('%Y-%m-%d')})
tick_datetime = datetime.strptime(tick['date'] + ' ' + tick['time'], '%Y-%m-%d %H:%M:%S')
# 修正毫秒
if tick['time'] == last_time:
# 与上一个tick的时间(去除毫秒后)相同,修改为500毫秒
tick_datetime = tick_datetime.replace(microsecond=500)
tick['time'] = tick_datetime.strftime('%H:%M:%S.%f')
else:
last_time = tick['time']
tick_datetime = tick_datetime.replace(microsecond=0)
tick['time'] = tick_datetime.strftime('%H:%M:%S.%f')
tick['datetime'] = tick_datetime
# 排除涨停/跌停的数据
if (float(tick['bid_price_1']) == float('1.79769E308') and int(tick['bid_volume_1']) == 0) \
or (float(tick['ask_price_1']) == float('1.79769E308') and int(tick['ask_volume_1']) == 0):
continue
ticks.append(tick)
del df
return ticks
def load_tq_csv_file(self, tick_folder, vt_symbol, tick_date):
"""从天勤下载的csv文件中读取tick,返回list[{dict}]"""
symbol, exchange = extract_vt_symbol(vt_symbol)
underly_symbol = get_underlying_symbol(symbol)
exchange_folder = VN_EXCHANGE_TICKFOLDER_MAP.get(exchange.value)
file_path = os.path.abspath(
os.path.join(
tick_folder,
tick_date.strftime('%Y%m'),
'{}_{}.csv'.format(symbol, tick_date.strftime('%Y%m%d'))))
ticks = []
if not os.path.isfile(file_path):
self.write_log(u'{}文件不存在'.format(file_path))
return None
try:
df = pd.read_csv(file_path, parse_dates=False)
# datetime,symbol,exchange,last_price,highest,lowest,volume,amount,open_interest,upper_limit,lower_limit,
# bid_price_1,bid_volume_1,ask_price_1,ask_volume_1,
# bid_price_2,bid_volume_2,ask_price_2,ask_volume_2,
# bid_price_3,bid_volume_3,ask_price_3,ask_volume_3,
# bid_price_4,bid_volume_4,ask_price_4,ask_volume_4,
# bid_price_5,bid_volume_5,ask_price_5,ask_volume_5
self.write_log(u'加载csv文件{}'.format(file_path))
last_time = None
for index, row in df.iterrows():
tick = row.to_dict()
tick['date'], tick['time'] = tick['datetime'].split(' ')
tick.update({'trading_day': tick_date.strftime('%Y-%m-%d')})
tick_datetime = datetime.strptime(tick['datetime'], '%Y-%m-%d %H:%M:%S.%f')
# 修正毫秒
if tick['time'] == last_time:
# 与上一个tick的时间(去除毫秒后)相同,修改为500毫秒
tick_datetime = tick_datetime.replace(microsecond=500)
tick['time'] = tick_datetime.strftime('%H:%M:%S.%f')
else:
last_time = tick['time']
tick_datetime = tick_datetime.replace(microsecond=0)
tick['time'] = tick_datetime.strftime('%H:%M:%S.%f')
tick['datetime'] = tick_datetime
# 排除涨停/跌停的数据
if (float(tick['bid_price_1']) == float('1.79769E308') and int(tick['bid_volume_1']) == 0) \
or (float(tick['ask_price_1']) == float('1.79769E308') and int(tick['ask_volume_1']) == 0):
continue
ticks.append(tick)
del df
except Exception as ex:
self.write_log(f'{file_path}文件读取不成功: {str(ex)}')
return None
return ticks
def load_bz2_cache(self, cache_folder, cache_symbol, cache_date):
"""加载缓存数据"""
if not os.path.exists(cache_folder):
self.write_error('缓存目录:{}不存在,不能读取'.format(cache_folder))
return None
cache_folder_year_month = os.path.join(cache_folder, cache_date[:6])
if not os.path.exists(cache_folder_year_month):
self.write_error('缓存目录:{}不存在,不能读取'.format(cache_folder_year_month))
return None
cache_file = os.path.join(cache_folder_year_month, '{}_{}.pkb2'.format(cache_symbol, cache_date))
if not os.path.isfile(cache_file):
cache_file = os.path.join(cache_folder_year_month, '{}_{}.pkz2'.format(cache_symbol, cache_date))
if not os.path.isfile(cache_file):
self.write_error('缓存文件:{}不存在,不能读取'.format(cache_file))
return None
with bz2.BZ2File(cache_file, 'rb') as f:
data = pickle.load(f)
return data
return None
def get_day_tick_df(self, test_day):
"""获取某一天得所有合约tick"""
tick_data_dict = {}
for vt_symbol in list(self.symbol_strategy_map.keys()):
symbol, exchange = extract_vt_symbol(vt_symbol)
if self.use_pkb2:
tick_list = self.load_bz2_cache(cache_folder=self.tick_path,
cache_symbol=symbol,
cache_date=test_day.strftime('%Y%m%d'))
else:
tick_list = self.load_csv_file(tick_folder=self.tick_path,
vt_symbol=vt_symbol,
tick_date=test_day)
if not tick_list or len(tick_list) == 0:
continue
symbol_tick_df = pd.DataFrame(tick_list)
# 缓存文件中,datetime字段,已经是datetime格式
# 暂时根据时间去重,没有汇总volume
symbol_tick_df.drop_duplicates(subset=['datetime'], keep='first', inplace=True)
symbol_tick_df.set_index('datetime', inplace=True)
tick_data_dict.update({vt_symbol: symbol_tick_df})
if len(tick_data_dict) == 0:
return None
tick_df =
|
pd.concat(tick_data_dict, axis=0)
|
pandas.concat
|
# -*- coding:utf-8 -*-
"""
基本面数据接口
Created on 2015/01/18
@author: <NAME>
@group : waditu
@contact: <EMAIL>
"""
import pandas as pd
from hsstock.tushare.stock import cons as ct
import lxml.html
from lxml import etree
import re
import time
from pandas.compat import StringIO
from hsstock.tushare.util import dateu as du
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def get_stock_basics(date=None):
"""
获取沪深上市公司基本情况
Parameters
date:日期YYYY-MM-DD,默认为上一个交易日,目前只能提供2016-08-09之后的历史数据
Return
--------
DataFrame
code,代码
name,名称
industry,细分行业
area,地区
pe,市盈率
outstanding,流通股本
totals,总股本(万)
totalAssets,总资产(万)
liquidAssets,流动资产
fixedAssets,固定资产
reserved,公积金
reservedPerShare,每股公积金
eps,每股收益
bvps,每股净资
pb,市净率
timeToMarket,上市日期
"""
wdate = du.last_tddate() if date is None else date
wdate = wdate.replace('-', '')
if wdate < '20160809':
return None
datepre = '' if date is None else wdate[0:4] + wdate[4:6] + '/'
request = Request(ct.ALL_STOCK_BASICS_FILE%(datepre, '' if date is None else wdate))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
text = text.replace('--', '')
df = pd.read_csv(
|
StringIO(text)
|
pandas.compat.StringIO
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 25 11:33:55 2020
@author: User
"""
import sys
from pathlib import Path
import functools
# import collections
from collections import Counter
import pickle
# import types
# import post_helper
# import plotting
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.stats import linregress, zscore
import pandas as pd
import numpy as np
import datetime as dt
import pandas as pd
mpl.style.use("seaborn")
mpl.rcParams["figure.dpi"] = 100
# from sklearn.cluster import KMeans
# print ('Name prepare input:', __name__ )
if __name__ == "__main__":
# print(f'Package: {__package__}, File: {__file__}')
# FH_path = Path(__file__).parent.parent.parent.joinpath('FileHelper')
# sys.path.append(str(FH_path))
# sys.path.append(str(Path(__file__).parent.parent.joinpath('indexer')))
sys.path.append(str(Path(__file__).parent.parent.parent))
# sys.path.append("..")
# print(sys.path)
# import FileHelper
from FileHelper.PostChar import Characterization_TypeSetting, SampleCodesChar
from FileHelper.PostPlotting import *
from FileHelper.FindSampleID import GetSampleID
from FileHelper.FindFolders import FindExpFolder
# from FileHelper.FileFunctions.FileOperations import PDreadXLorCSV
from collect_load import Load_from_Indexes, CollectLoadPars
# from FileHelper.FindExpFolder import FindExpFolder
from plotting import eisplot
from prep_postchar import postChar
import EIS_export
elif "prepare_input" in __name__:
pass
# import RunEC_classifier
# from FileHelper.FindSampleID import FindSampleID
import logging
_logger = logging.getLogger(__name__)
# from FileHelper.PostChar import SampleSelection, Characterization_TypeSetting
def mkfolder(folder):
folder.mkdir(exist_ok=True, parents=True)
return folder
def filter_cols(_df, n):
if any(["startswith" in i for i in n]):
_lst = [i for i in _df.columns if i.startswith(n[-1])]
else:
_lst = [i for i in _df.columns if n[-1] in i]
return _lst
OriginColors = Characterization_TypeSetting.OriginColorList()
Pfolder = FindExpFolder().TopDir.joinpath(
Path("Preparation-Thesis/SiO2_projects/SiO2_Me_ECdepth+LC")
)
plotsfolder = mkfolder(Pfolder.joinpath("correlation_plots"))
EC_folder = Pfolder.joinpath("EC_data")
EC_index, SampleCodes = Load_from_Indexes.get_EC_index()
print("finished")
# SampleCodesChar().load
def multiIndex_pivot(df, index=None, columns=None, values=None):
# https://github.com/pandas-dev/pandas/issues/23955
output_df = df.copy(deep=True)
if index is None:
names = list(output_df.index.names)
output_df = output_df.reset_index()
else:
names = index
output_df = output_df.assign(
tuples_index=[tuple(i) for i in output_df[names].values]
)
if isinstance(columns, list):
output_df = output_df.assign(
tuples_columns=[tuple(i) for i in output_df[columns].values]
) # hashable
output_df = output_df.pivot(
index="tuples_index", columns="tuples_columns", values=values
)
output_df.columns = pd.MultiIndex.from_tuples(
output_df.columns, names=columns
) # reduced
else:
output_df = output_df.pivot(
index="tuples_index", columns=columns, values=values
)
output_df.index = pd.MultiIndex.from_tuples(output_df.index, names=names)
return output_df
def get_float_cols(df):
return [key for key, val in df.dtypes.to_dict().items() if "float64" in str(val)]
def cm2inch(value):
return value / 2.54
# class PorphSamples():
# def __init__(self):
# self.template = PorphSamples.template()
def decorator(func):
@functools.wraps(func)
def wrapper_decorator(*args, **kwargs):
# Do something before
value = func(*args, **kwargs)
# Do something after
return value
return wrapper_decorator
def read_load_pkl(_pklstem):
_pklpath = EC_PorphSiO2.folder.joinpath(_pklstem).with_suffix(".pkl")
if _pklpath.exists():
try:
print("pkl reloading:", _pklpath)
DF_diff = pd.read_pickle(_pklpath)
DF_diff.columns
return DF_diff
except Exception as e:
print("reading error", e)
return pd.DataFrame()
else:
print("read error not existing", _pklpath)
return pd.DataFrame()
def save_DF_pkl(_pklstem, _DF):
_pklpath = EC_PorphSiO2.folder.joinpath(_pklstem).with_suffix(".pkl")
try:
print("pkl saving to:", _pklpath)
_DF.to_pickle(_pklpath)
except Exception as e:
print("pkl saving error", e, _pklpath)
return _pklpath
def load_dict_pkl(_pklstem):
_pklpath = EC_PorphSiO2.folder.joinpath(_pklstem).with_suffix(".pkl")
if _pklpath.exists():
try:
print("pkl reloading:", _pklpath)
with open(_pklpath, "rb") as file:
_dict = pickle.load(file)
return _dict
except Exception as e:
print("reading error", e)
return {}
else:
print("read error not existing", _pklpath)
return {}
def save_dict_pkl(_pklstem, _dict):
_pklpath = EC_PorphSiO2.folder.joinpath(_pklstem).with_suffix(".pkl")
try:
print("pkl saving to:", _pklpath)
with open(_pklpath, "wb") as file:
pickle.dump(_dict, file)
except Exception as e:
print("pkl saving error", e, _pklpath)
return _pklpath
def PorphSiO2_template():
# 'SerieIDs' : ('Porph_SiO2')*5,
Series_Porph_SiO2 = {
"SampleID": ("JOS1", "JOS2", "JOS3", "JOS4", "JOS5"),
"Metal": ("Fe", "Co", "MnTPP", "FeTPP", "H2"),
"color": (2, 4, 6, 15, 3),
}
Porphyrins = {
"TMPP": {"Formula": "C48H38N4O4", "MW": 734.8382},
"TMPP-Fe(III)Cl": {"Formula": "C48H36ClFeN4O4", "MW": 824.1204},
"TMPP-Co(II)": {"Formula": "C48H36CoN4O4", "MW": 791.7556},
"TTP-Mn(III)Cl": {"Formula": "C44H28ClMnN4", "MW": 703.1098},
"TPP-Fe(III)Cl": {"Formula": "C44H28ClFeN4", "MW": 704.0168},
"TPP": {"Formula": "C44H30N4", "MW": 614.7346},
}
Porph_template = pd.DataFrame(Series_Porph_SiO2)
return Porph_template
def EC_types_grp():
# KL ['ORR_E_AppV_RHE', 'ORR_KL_E_AppV_RHE','Electrode']
_basic_EC_cond = ["postAST_post", "Sweep_Type", "pH", "Loading_cm2"]
_extra_EC_cond = {
"N2CV": [],
"N2": [],
"ORR": ["RPM_DAC_uni"],
"KL": ["Electrode", "ORR_E_AppV_RHE"],
"EIS": ["E_RHE"],
"HER": ["HER_RPM_post"],
"OER": [],
}
_out = {key: _basic_EC_cond + val for key, val in _extra_EC_cond.items()}
return _out
def save_EC_index_PorphSiO2(EC_index, EC_folder):
_porph_index = EC_index.loc[EC_index.SampleID.isin(PorphSiO2_template().SampleID)]
_porph_index.to_excel(EC_folder.joinpath("EC_index_PorphSiO2.xlsx"))
# save_EC_index_PorphSiO2(EC_index, EC_folder)
class EC_PorphSiO2:
folder = FindExpFolder("PorphSiO2").compare
Porph_template = PorphSiO2_template()
# globals EC_index
# ['Model(Singh2015_RQRQ)', 'Model(Singh2015_RQRQR)', 'Model(Bandarenka_2011_RQRQR)',
# 'Model(Singh2015_RQRWR)', 'Model(Randles_RQRQ)', 'Model(Singh2015_R3RQ)']
# model_select = EC_PorphSiO2.EIS_models[1]
# self = EC_PorphSiO2()
def __init__(self):
# self.index, self.AST_days = EC_PorphSiO2.select_ECexps(EC_folder)
self.select_EC_ASTexps_from_ECindex()
# self.pars = EC_PorphSiO2.mergedEC()
# self.par_export = EC_OHC.to_excel(self.folder.joinpath('EC_ORR_HPRR.xlsx'))
def select_EC_ASTexps_from_ECindex(self):
EC_idx_PorphSiO2_samples = EC_index.loc[
EC_index.SampleID.isin(self.Porph_template.SampleID.unique())
]
# pd.read_excel(list(EC_folder.rglob('*EC_index*'))[0])
EC_idx_PorphSiO2_samples = EC_idx_PorphSiO2_samples.assign(
**{
"PAR_date_day_dt": [
dt.date.fromisoformat(np.datetime_as_string(np.datetime64(i, "D")))
for i in EC_idx_PorphSiO2_samples.PAR_date.to_numpy()
]
}
)
self.EC_idx_PorphSiO2_samples = EC_idx_PorphSiO2_samples
self.get_AST_days()
# LC_idx_fp = list(EC_folder.rglob('*EC_index*'))[0].parent.joinpath('LC_index.xlsx')
EC_idx_PorphSiO2_AST = EC_idx_PorphSiO2_samples.loc[
EC_idx_PorphSiO2_samples.PAR_date_day_dt.isin(
[i for a in self.AST_days.to_numpy() for i in a]
)
]
# AST_days = EC_PorphSiO2.get_AST_days()
# EC_idx_PorphSiO2_AST.to_excel(list(EC_folder.rglob('*EC_index*'))[0].parent.joinpath('LC_index.xlsx'))
self.EC_idx_PorphSiO2 = EC_idx_PorphSiO2_AST
# if LC_idx_fp.exists():
# else:
# try:
# LC_fls = pd.read_excel(LC_idx_fp,index_col=[0])
# except Exception as e:
# print(f'Excel load fail: {e}\n,file: {LC_idx_fp}')
# LC_fls = pd.DataFrame()
# return LC_fls, AST_days
def get_AST_days(self):
gr_idx = self.EC_idx_PorphSiO2_samples.groupby("PAR_date_day_dt")
AST_days = []
for n, gr in gr_idx:
# n,gr
exps = gr.PAR_exp.unique()
# gr.PAR_date_day.unique()[0]
if any(["AST" in i for i in exps]):
# print(n,exps)
# AST_days.append(n)
if n + dt.timedelta(1) in gr_idx.groups.keys():
_post = gr_idx.get_group(n + dt.timedelta(1))
# print(n + dt.timedelta(1), gr_idx.get_group(n + dt.timedelta(1)))
AST_days.append((n, n + dt.timedelta(1)))
else:
AST_days.append((n, n))
print(n + dt.timedelta(1), "grp missing")
# (AST_days[-1][0], AST_days[0][1])
# AST_days.append((dt.date(2019,5,6), dt.date(2019,1,25)))
# AST_days.append((dt.date(2019,5,6), dt.date(2019,1,26)))
_extra_AST_days = [
(dt.date(2019, 5, 6), dt.date(2019, 1, 25)),
(dt.date(2019, 5, 6), dt.date(2019, 1, 26)),
]
AST_days += _extra_AST_days
AST_days = pd.DataFrame(
AST_days, columns=["PAR_date_day_dt_pre", "PAR_date_day_dt_post"]
)
AST_days = AST_days.assign(
**{
"PAR_date_day_dt_diff": AST_days.PAR_date_day_dt_pre
- AST_days.PAR_date_day_dt_post
}
)
self.AST_days = AST_days
# def select_ECexps(EC_folder):
# LC_idx_fp = list(EC_folder.rglob('*EC_index*'))[0].parent.joinpath('LC_index.xlsx')
# AST_days = EC_PorphSiO2.get_AST_days()
# if LC_idx_fp.exists():
# LC_fls = EC_PorphSiO2.EC_idx_PorphSiO2.loc[EC_PorphSiO2.EC_idx_PorphSiO2.PAR_date_day_dt.isin([i for a in AST_days.to_numpy() for i in a])]
# LC_fls.to_excel(list(EC_folder.rglob('*EC_index*'))[0].parent.joinpath('LC_index.xlsx'))
# else:
# try:
# LC_fls = pd.read_excel(LC_idx_fp,index_col=[0])
# except Exception as e:
# print(f'Excel load fail: {e}\n,file: {LC_idx_fp}')
# LC_fls = pd.DataFrame()
# return LC_fls, AST_days
# def repr_index(self):
# PAR_exp_uniq = {grn : len(grp) for grn,grp in self.index.groupby("PAR_exp")}
# print(f'Len({len(self.index)},\n{PAR_exp_uniq}')
def _testing_():
tt = EC_prepare_EC_merged(reload_AST=True, reload_merged=True, reload_pars=True)
self = tt
N2CV = self.N2cv(reload=False, use_daily=True)
#%% == EC_prepare_EC_merged == testing
class EC_prepare_EC_merged:
EIS_models = EIS_export.EIS_selection.mod_select
# ['Model(EEC_Randles_RWpCPE)', 'Model(EEC_2CPE)', 'Model(EEC_2CPEpW)',
# 'Model(EEC_RQ_RQ_RW)', 'Model(EEC_RQ_RQ_RQ)', 'Model(Randles_RQRQ)']
ORR_reload = dict(reload=True, use_daily=False)
ORR_no_reload = dict(reload=False, use_daily=True)
use_daily = True
# global ParsColl
# ParsColl = ParsColl
mcols = [i for i in Load_from_Indexes.EC_label_cols if i not in ["PAR_file"]] + [
"Sweep_Type"
]
_pkl_EC_merged = "EC_merged_dict"
def __init__(self, reload_AST=False, reload_merged=False, reload_pars=True):
self.reload_AST = reload_AST
self.reload_merged = reload_merged
self.reload_pars = reload_pars
self.set_pars_collection()
self.reload_pars_kws = dict(reload=reload_pars, use_daily=self.use_daily)
self.EC_merged_dict = {}
self.load_EC_PorphSiO2()
self.load_merged_EC()
def set_pars_collection(self):
if "ParsColl" in globals().keys():
self.ParsColl = ParsColl
else:
Pars_Collection = CollectLoadPars(load_type="fast")
# globals()['Pars_Collection'] = Pars_Collection
ParsColl = Pars_Collection.pars_collection
self.ParsColl = ParsColl
def load_EC_PorphSiO2(self):
self.EC_PorphSiO2 = EC_PorphSiO2()
self.AST_days = self.EC_PorphSiO2.AST_days
self.EC_idx_PorphSiO2 = self.EC_PorphSiO2.EC_idx_PorphSiO2
def load_merged_EC(self):
if self.reload_merged:
self.reload_merged_EC()
if not self.EC_merged_dict:
_load_EC_merge = load_dict_pkl(self._pkl_EC_merged)
if _load_EC_merge:
self.EC_merged_dict = _load_EC_merge
def reload_merged_EC(self):
try:
self.load_N2CV()
self.load_ORR()
self.load_KL()
self.load_EIS()
self.load_HER()
self.add_filter_selection_of_EC_merged()
save_dict_pkl(self._pkl_EC_merged, self.EC_merged_dict)
except Exception as e:
_logger.warning(f"EC_prepare_EC_merged, reload_merged_EC failure: {e}")
def get_AST_matches(self, DF, _verbose=False):
# LC_fls, AST_days = EC_PorphSiO2.select_ECexps(EC_folder)
# DF = ORR.drop_duplicates()
# DF = N2CV.drop_duplicates()
# DF = EIS.drop_duplicates()
# DF = HER.drop_duplicates()
# DF = ttpars
if "PAR_date_day_dt" not in DF.columns:
DF = DF.assign(
**{
"PAR_date_day_dt": [
dt.date.fromisoformat(
np.datetime_as_string(np.datetime64(i, "D"))
)
for i in DF.PAR_date.to_numpy()
]
}
)
DF.PAR_date_day_dt = pd.to_datetime(DF.PAR_date_day_dt, unit="D")
# list((set(DF.columns).intersection(set(LC_fls.columns))).intersection(set(mcols) ))
# DF = pd.merge(DF,LC_fls,on=)
_compare_cols = [
i for i in ["SampleID", "pH", "Gas", "Loading_cm2"] if i in DF.columns
]
_swp_rpm = [
"Sweep_Type",
"RPM_DAC_uni" if "RPM_DAC_uni" in DF.columns else "RPM_DAC",
]
_coll = []
# AST_days_run_lst = [i for i in AST_days if len(i) == 2][-1:]
for n, r in self.AST_days.iterrows():
# if len(_dates) == 2:
# _pre,_post = _dates
# elif (len_dates) == 1:
_pre, _post = r.PAR_date_day_dt_pre, r.PAR_date_day_dt_post
_preslice = DF.loc[
(DF.PAR_date_day == _pre.strftime("%Y-%m-%d")) & (DF.postAST == "no")
]
pre = _preslice.groupby(_compare_cols)
_postslice = DF.loc[
(DF.PAR_date_day == _post.strftime("%Y-%m-%d")) & (DF.postAST != "no")
]
post = _postslice.groupby(_compare_cols)
_res = {}
_res = {
"pre_PAR_date_day_dt": _pre,
"post_PAR_date_day_dt": _post,
"AST_days_n": n,
}
# print(_res,[_preslice.postAST.unique()[0], _postslice.postAST.unique()[0]])
union = set(pre.groups.keys()).union(set(post.groups.keys()))
matches = set(pre.groups.keys()).intersection(set(post.groups.keys()))
_difference_pre = set(pre.groups.keys()).difference(set(post.groups.keys()))
_difference_post = set(post.groups.keys()).difference(
set(pre.groups.keys())
)
# _diffr.append((_pre,_post,_difference_pre, _difference_post))
if not _preslice.empty and not _postslice.empty:
for match in union:
_res.update(dict(zip(_compare_cols, match)))
_mgrpcols = ["PAR_file", "dupli_num", "postAST"]
if match in matches:
_mpre = pre.get_group(match).groupby(_mgrpcols)
_mpost = post.get_group(match).groupby(_mgrpcols)
elif match in _difference_pre:
_mpre = pre.get_group(match).groupby(_mgrpcols)
_mpost = pre.get_group(match).groupby(_mgrpcols)
elif match in _difference_post:
_mpre = post.get_group(match).groupby(_mgrpcols)
_mpost = post.get_group(match).groupby(_mgrpcols)
# print(_mpost.groups)
for (_prePF, npr, _preAST), prgrp in _mpre:
_res.update(
{
"pre_dupli_num": npr,
"pre_PAR_file": _prePF,
"pre_postAST": _preAST,
}
)
for (_poPF, npo, _postAST), pogrp in _mpost:
_res.update(
{
"post_dupli_num": npo,
"post_PAR_file": _poPF,
"post_postAST": _postAST,
"dupli_num_combo": f"{npr}, {npo}",
}
)
if _postAST in "postAST_sHA|postAST_LC" and _verbose:
print(_res)
_pr1 = prgrp.groupby(_swp_rpm)
_po1 = pogrp.groupby(_swp_rpm)
_rpmswp_matches = set(_pr1.groups.keys()).intersection(
set(_po1.groups.keys())
)
for _m in _rpmswp_matches:
_res.update(dict(zip(_swp_rpm, _m)))
# print(_res)
_coll.append(_res.copy())
AST_matches = pd.DataFrame(_coll)
return AST_matches
# prgrp.groupby(['Sweep_Type','RPM_DAC']).groups
# prgrp['ORR_Jkin_min_700']-pogrp['ORR_Jkin_min_700']
def load_N2CV(self):
N2CV = self.edit_pars_N2cv(**self.reload_pars_kws)
# N2_pltqry = EC_merged_dict.get('N2CV')
N2_AST = self.get_AST_matches(N2CV)
N2_AST_diff = self.compare_AST_pars(N2CV, N2_AST, reload=self.reload_AST)
# _DFtype = EC_PorphSiO2.sense_DF_type(N2CV)
# EC_merged_dict.update({'N2CV' : N2_AST_diff})
self.EC_merged_dict.update(
{"N2CV": {"PARS": N2CV, "AST_matches": N2_AST, "AST_diff": N2_AST_diff}}
)
def load_ORR(self, _testing=False):
ORR = self.edit_pars_ORR()
ORR_AST = self.get_AST_matches(ORR)
ORR_AST_diff = self.compare_AST_pars(ORR, ORR_AST, reload=self.reload_AST)
if _testing:
ttpars = ORR.query('RPM_DAC_uni > 1000 & Sweep_Type == "cathodic"')
tt_AST = self.get_AST_matches(ttpars)
tt = ORR_AST.query('RPM_DAC_uni > 1000 & Sweep_Type == "cathodic"')
tt_diff = self.compare_AST_pars(ORR, tt, reload=reload_AST, save_pkl=False)
# ttpfs = ORR.loc[ORR.ORR_Jkin_max_700 > 0].PAR_file.unique()
# ttpfs = ORR.query('Sweep_Type == "mean"').loc[ORR.ORR_E_onset > 0.85].PAR_file.unique()
# ORR.loc[(ORR.ORR_E_onset > 0.85) & (ORR.Sweep_Type == 'cathodic')].PAR_file.unique()
# EC_merged_dict.update({'ORR' : ORR_AST_diff})
self.EC_merged_dict.update(
{"ORR": {"PARS": ORR, "AST_matches": ORR_AST, "AST_diff": ORR_AST_diff}}
)
def load_KL(self):
KL = self.edit_pars_KL()
KL = KL.assign(**{"RPM_DAC": 1500})
KL_AST = self.get_AST_matches(KL)
KL_AST_diff = self.compare_AST_pars(KL, KL_AST, reload=self.reload_AST)
# EC_merged_dict.update({'KL' : KL_AST_diff})
self.EC_merged_dict.update(
{"KL": {"PARS": KL, "AST_matches": KL_AST, "AST_diff": KL_AST_diff}}
)
def load_EIS(self):
EIS = self.edit_pars_EIS()
EIS_AST = self.get_AST_matches(EIS)
EIS_AST_diff = self.compare_AST_pars(EIS, EIS_AST, reload=self.reload_AST)
# EC_merged_dict.update({'EIS' : EIS_AST_diff})
self.EC_merged_dict.update(
{"EIS": {"PARS": EIS, "AST_matches": EIS_AST, "AST_diff": EIS_AST_diff}}
)
def load_HER(self):
HER = self.edit_pars_HER()
HER_type_grp = HER.groupby("HER_type")
HER.HER_at_E_slice = HER.HER_at_E_slice.round(3)
HER_AST = self.get_AST_matches(HER)
for Htype, Hgrp in HER_type_grp:
# Htype, Hgrp = 'E_slice', HER.loc[HER.groupby('HER_type').groups['E_slice']]
HER_AST_diff = self.compare_AST_pars(
Hgrp, HER_AST, reload=self.reload_AST, extra=Htype
)
try:
if not HER_AST_diff.empty:
self.EC_merged_dict.update(
{
f"HER_{Htype}": {
"PARS": Hgrp,
"AST_matches": HER_AST,
"AST_diff": HER_AST_diff,
}
}
)
except Exception as e:
print(f"HER {Htype} fail, {e}")
# EC_merged_dict.update({f'HER_{Htype}' : HER_AST_diff})
def finish_EC_merged(self):
_pkl_EC_merged = "EC_merged_dict"
EC_merged_dict = EC_PorphSiO2.add_filter_selection_of_EC_merged(EC_merged_dict)
save_dict_pkl(_pkl_EC_merged, EC_merged_dict)
# EC_merged_dict = load_dict_pkl(_pkl_EC_merged)
def add_filter_selection_of_EC_merged(self):
_drop_AST_row_pre = [
"2019-01-25;N2_20cls_300_100_10_JOS5_256;no;0",
"2019-01-25;N2_20cls_300_100_10_JOS4_256;no;0",
]
_check_cols = [
"SampleID",
"AST_row",
"PAR_date_day_dt_pre",
"PAR_date_day_dt_post",
"postAST_post",
]
_srt2 = ["postAST_post", "SampleID"]
_ch_match = [
"SampleID",
"pre_PAR_date_day_dt",
"post_PAR_date_day_dt",
"post_postAST",
"pre_postAST",
]
_sortcols = ["SampleID", "post_postAST"][::-1]
pd.set_option("display.max_columns", 6)
pd.set_option("display.width", 100)
for _EC, _DF in self.EC_merged_dict.items():
# _EC, _DF = 'N2CV', EC_merged_dict['N2CV']
# _EC, _DF = 'ORR', EC_merged_dict['ORR']
# print(_EC)
if "AST_row_n" not in _DF["AST_diff"].columns:
_DF["AST_diff"]["AST_row_n"] = [
int(i[-1]) for i in _DF["AST_diff"].AST_row.str.split("_").values
]
AST_diff = _DF["AST_diff"].copy()
AST_diff.loc[~AST_diff.AST_row_pre.isin(_drop_AST_row_pre)]
AST_matches = (
_DF["AST_matches"].copy().sort_values(by=["post_postAST", "SampleID"])
)
_rem1 = AST_matches.loc[
(AST_matches.post_postAST == "postAST_LC")
& (AST_matches.SampleID.isin(["JOS2", "JOS4", "JOS5"]))
& (AST_matches.pre_PAR_date_day_dt == dt.date(2019, 1, 25))
].assign(**{"rem": 1})
_rem2 = AST_matches.loc[
(
(AST_matches.post_postAST == "postAST_LC")
& (AST_matches.pre_postAST == "no")
& (
AST_matches.SampleID.isin(
["JOS1", "JOS2", "JOS3", "JOS4", "JOS5"]
)
)
& (AST_matches.pre_PAR_date_day_dt == dt.date(2019, 5, 6))
& (
AST_matches.post_PAR_date_day_dt.isin(
[dt.date(2019, 1, 25), dt.date(2019, 1, 26)]
)
)
)
].assign(**{"rem": 2})
# _keep_clean.loc[2].to_dict()
# _jos3 = {'SampleID': 'JOS3', 'pre_PAR_date_day_dt': dt.date(2019, 1, 24), 'post_PAR_date_day_dt': dt.date(2019, 1, 25),
# 'post_postAST': 'postAST_LC', 'pre_postAST': 'no'}
# _jos3qry = ' & '.join([f'{k} == {repr(val)}' for k,val in _jos3.items()])
# AST_matches.query(_jos3qry)
_rem3 = AST_matches.loc[
(
(AST_matches.post_postAST == "postAST_LC")
& (AST_matches.pre_postAST == "no")
& (AST_matches.SampleID.isin(["JOS3"]))
& (AST_matches.pre_PAR_date_day_dt == dt.date(2019, 1, 24))
& (AST_matches.post_PAR_date_day_dt == dt.date(2019, 1, 25))
)
].assign(**{"rem": 3})
_rem4 = AST_matches.loc[(AST_matches.pre_postAST != "no")].assign(
**{"rem": 4}
)
_edit = _rem1 # changed later 15.03
_remove = pd.concat([_rem2, _rem4, _rem3])
_keep = AST_matches.iloc[~AST_matches.index.isin(_remove.index.values)]
AST_matches[_ch_match].drop_duplicates()
_rem_clean = _remove[_ch_match + ["rem"]].sort_values(by=_sortcols)
_keep_clean = _keep[_ch_match].sort_values(by=_sortcols)
# _remove[['SampleID','post_postAST']] # check
# _rem = _DF['AST_diff'].loc[_DF['AST_diff']['AST_row_n'].isin(_remove.index.values)]
# _rem[['SampleID','postAST_post','PAR_date_day_pre']] #check
_filtered = AST_diff.loc[~AST_diff["AST_row_n"].isin(_remove.index.values)]
# DF['AST_diff'] = _filtered
self.EC_merged_dict.update({_EC: {**_DF, **{"AST_diff_filter": _filtered}}})
print(
f'EC merged dict updated with dropped rows in "AST_diff_filter" for:\n {self.EC_merged_dict.keys()}'
)
# return EC_merged_dict
# _DF['AST_diff'].loc[_DF['AST_diff'].AST_row_n.isin(_rem]
def EC_merge_postchar(_reloadset=False):
_pkl_EC_postchar = "EC_merged_postchars"
EC_postchars = load_dict_pkl(_pkl_EC_postchar)
if not EC_postchars and _reloadset != True:
EC_merged_dict = EC_PorphSiO2.mergedEC(_reloadset=True)
# EC_merged_dict_bak = EC_merged_dict.copy()
EC_merged_dict = EC_PorphSiO2.add_filter_selection_of_EC_merged(
EC_merged_dict
)
postChars = postChar().merged
_extracols = [i for i in SampleCodes.columns if not "SampleID" in i]
EC_postchars = {}
for _EC, _DF_dict in EC_merged_dict.items():
_DF = _DF_dict["AST_diff_filter"]
_initcols = _DF.columns
_DF = _DF.dropna(axis=1, how="all")
_DF = _DF.drop(columns=_DF.columns.intersection(_extracols))
_DF = pd.merge(_DF, postChars, on="SampleID")
_postcols = _DF.columns
EC_postchars.update({_EC: _DF})
save_dict_pkl(_pkl_EC_postchar, EC_postchars)
return EC_postchars
def _fix_ORR_scans():
EC_postchars = EC_PorphSiO2.EC_merge_postchar(_reloadset=True)
_ORR = EC_postchars["ORR"]
_J245 = _ORR.loc[
_ORR.SampleID.isin(["JOS2,", "JOS4", "JOS5"])
& (_ORR.postAST_post == "postAST_LC")
]
_extracols = [i for i in SampleCodes.columns if not "SampleID" in i]
def compare_AST_pars(self, _DF, _AST_in, reload=False, extra="", save_pkl=True):
# _DF, _AST_in = EIS, EIS_AST
# _DF, _AST_in = N2CV, N2_AST
# _DF, _AST_in = ORR, ORR_AST
# _DF, _AST_in = KL, KL_AST
# _DF, _AST_in = HER, HER_AST
# _DF, _AST_in = Hgrp, HER_AST
# _DF, _AST_in = ttN2CV, ttAST
# reload, extra = _reloadset, Htype
_DF = _DF.drop_duplicates()
_DFtype = self.sense_DF_type(_DF)
_DFtype = "".join([i for i in _DFtype if str.isalpha(i)])
_DFtype_prefix = _DFtype.split("_")[0]
if extra:
_pklpath = EC_PorphSiO2.folder.joinpath(
f"AST_compared_pars_{_DFtype}_{extra}.pkl"
)
else:
_pklpath = EC_PorphSiO2.folder.joinpath(f"AST_compared_pars_{_DFtype}.pkl")
if _pklpath.exists() and not reload:
try:
print("AST compare reading:", _pklpath)
DF_diff = pd.read_pickle(_pklpath)
return DF_diff
except Exception as e:
return print("reading error", e)
else:
_prec = [i for i in _AST_in.columns if not i.startswith("post")]
_precols = [
i.split("pre_")[-1] if i.startswith("pre") else i for i in _prec
]
_post = [i for i in _AST_in.columns if not i.startswith("pre")]
_postcols = [
i.split("post_")[-1] if i.startswith("post") else i for i in _post
]
_dropnacols = set(_post + _prec)
list(set(_prec).intersection(set(_post)))
_AST = _AST_in.dropna(subset=_dropnacols, how="any")
# _AST = _AST_in.loc[(_AST_in.SampleID == "JOS4") ]
# & (_AST_in.post_postAST.str.contains('LC'))]
_DF_diff_out = []
_errors = []
_dms = []
# _AST.loc[_AST.SampleID == "JOS4"].tail(2)
for n, r in _AST.iterrows():
# r[_dropnacols]
_uniq_AST_row_pre = f"{r.pre_PAR_date_day_dt};{Path(r.pre_PAR_file).stem};{r.pre_postAST};{int(r.pre_dupli_num)}"
_uniq_AST_row_post = f"{r.post_PAR_date_day_dt};{Path(r.post_PAR_file).stem};{r.post_postAST};{int(r.post_dupli_num)}"
# EIS.query(' and '.join([f'{k} == {repr(v)}' for k, v in _pred.items()]))
_pred = dict(zip(_precols, r[_prec].to_dict().values()))
_preQ = " & ".join(
[f"{k} == {repr(v)}" for k, v in _pred.items() if k in _DF.columns][
1:
]
)
_Dpre = _DF.query(_preQ).dropna(axis=1, how="all")
_postd = dict(zip(_postcols, r[_post].to_dict().values()))
_postQ = " & ".join(
[
f"{k} == {repr(v)}"
for k, v in _postd.items()
if k in _DF.columns
][1:]
)
_Dpos = _DF.query(_postQ).dropna(axis=1, how="all")
_dms.append((n, _pred, _postd))
# pd.merge(_Dpre,_Dpos)
_0 = [
(i, _Dpre[i].unique()[0])
for i in _Dpre.columns
if _Dpre[i].nunique() <= 1 and not i.startswith(_DFtype_prefix)
]
_1 = [
(i, _Dpos[i].unique()[0])
for i in _Dpos.columns
if _Dpos[i].nunique() <= 1 and not i.startswith(_DFtype_prefix)
]
# _dms.append((n, len(_Dm), _Dm ))
_mcols = [
i[0]
for i in set(_0).intersection(set(_1))
if not i[0].startswith("dupli")
]
_mcols = [
i
for i in _mcols
if i not in ["PAR_exp", "Dest_dir"] and not i.startswith("EXP_")
]
_mcols.sort()
_othercols = _Dpos.columns.difference(_mcols)
t2 = _Dpos[_othercols]
if "EIS" in _DFtype and all(
["E_RHE" in i for i in [_Dpre.columns, _Dpos.columns]]
):
_mcols += ["E_RHE"]
# _Dm = pd.merge(_Dpre,_Dpos,on=_mcols + ['E_RHE'],suffixes=['_pre','_post'])
elif "ORR" in _DFtype:
_KLcols = ["ORR_E_AppV_RHE", "ORR_KL_E_AppV_RHE", "Electrode"]
if all(i in _othercols for i in _KLcols):
_mcols += _KLcols
# _Dm = pd.merge(_Dpre, _Dpos, on = _mcols, suffixes = ['_pre','_post'])
elif "HER" in _DFtype:
_addcols = [
i
for i in [
"HER_type",
"HER_at_J_slice",
"HER_at_E_slice",
"HER_Segnum",
]
if i in set(_Dpre.columns).union(_Dpos.columns)
]
_mcols += _addcols
_Dm = pd.merge(_Dpre, _Dpos, on=_mcols, suffixes=["_pre", "_post"])
_Dm = _Dm.assign(
**{
"AST_row": f"{_DFtype}_{n}",
"AST_row_n": int(n),
"AST_days_n": r.AST_days_n,
"AST_row_pre": _uniq_AST_row_pre,
"AST_row_post": _uniq_AST_row_post,
}
)
# [(i, _Dpos[i].nunique(), _Dpos[i].unique()[0], _Dpre[i].nunique(), _Dpre[i].unique()[0], (_Dpos[i].unique(),_Dpre[i].unique()))
# for i in _mcols if _Dpos[i].nunique() > 1]
if _Dm.empty:
run_this
# try:
# _Dm = pd.merge_asof(_Dpre.sort_values(_mcols), _Dpos.sort_values(_mcols), on = _mcols, suffixes = ['_pre','_post'])
_parcols = [
(i, i.replace("_pre", "_post"))
for i in _Dm.columns
if i.startswith(_DFtype_prefix)
and i.endswith("_pre")
and i.replace("_pre", "_post") in _Dm.columns
]
for _c0, _c1 in _parcols:
try:
_diffabs = _Dm[_c0] - _Dm[_c1]
_diffperc = 100 * (_Dm[_c1] - _Dm[_c0]) / _Dm[_c0]
_Dm = _Dm.assign(
**{
_c0.split("_pre")[0] + "_diff_abs": _diffabs,
_c0.split("_pre")[0] + "_diff_perc": _diffperc,
}
)
except Exception as e:
# pass
_errors.append((_c0, _c1, e))
_DF_diff_out.append(_Dm)
# print(_c0, e)
DF_diff = pd.concat(_DF_diff_out).drop_duplicates()
if save_pkl == True:
DF_diff.to_pickle(_pklpath)
_logger.info(f"AST compare len({len(DF_diff)}) saved to:{_pklpath}")
return DF_diff
# DF_diff.groupby(['postAST_post','SampleID']).plot(x='E_RHE', y='EIS_Rct_O2_diff_abs',ylim=(-200,200))
def sense_DF_type(self, _DF):
# _c = [i[0] for i in Counter([i.split('_')[0] for i in _DF.columns]).most_common(5) if i[0] not in ['BET','tM']][0]
_excl = set(self.EC_idx_PorphSiO2.columns).union(SampleCodes.columns)
_res = [
i
for i in Counter(
["_".join(i.split("_")[0:2]) for i in _DF.columns]
).most_common(20)
if not any([i[0] in b for b in _excl]) and i[0][0].isalnum()
]
_res2 = Counter(["_".join(i.split("_")[0:1]) for i, c in _res])
_type = _res2.most_common(1)[0][0]
_extraC = Counter(
["_".join(i.split("_")[1:2]) for i in _DF.columns if _type in i]
).most_common(1)
if _extraC[0][1] > 4:
_type = f"{_type}_{_extraC[0][0]}"
# if _res2.most_common(2)[1][1] > 3:
# _type = f'{_type}_{_res2.most_common(2)[1][0]}'
return _type
# EC_all_merged_lst.append(EC_OHN_merged)
# EC_all_merged = pd.concat(EC_all_merged_lst)
# ORR_cath = EC_PorphSiO2.ORR_updated_pars(sweep_type_select='cathodic')
# ORR_an = EC_Pfrom collections import CounterorphSiO2.ORR_updated_pars(sweep_type_select='anodic')
# EC_OHN2 = pd.merge(template, pd.merge(ORR_an,pd.merge(HPRR, N2CV),on='SampleID'), on='SampleID')
# EC_OHN2_cath = pd.merge(template, pd.merge(ORR,pd.merge(HPRR, N2CV),on='SampleID'), on='SampleID')
# EC_OHN2.to_excel(FindExpFolder('PorphSiO2').compare.joinpath('EC_ORR_HPRR_N2.xlsx'))
def export_to_xls(EC_OHN_merged):
export_path = FindExpFolder("PorphSiO2").compare.joinpath(f"EC_pars_all.xlsx")
if "Sweep_Type" in EC_OHN_merged.columns:
with pd.ExcelWriter(export_path) as writer:
for swp, swpgr in EC_OHN_merged.groupby("Sweep_Type"):
swpgr.to_excel(writer, sheet_name=swp)
swpgr.to_excel(export_path.with_name(f"EC_pars_{swp}.xlsx"))
else:
export_path = FindExpFolder("PorphSiO2").compare.joinpath(
"EC_pars_no-sweep.xlsx"
)
EC_OHN_merged.to_excel(export_path)
print(f"EC pars saved to:\n{export_path}")
return export_path
def edit_columns(func, template=pd.concat([PorphSiO2_template(), SampleCodes])):
def wrapper(*args, **kwargs):
if kwargs:
pars_out, suffx = func(*args, **kwargs)
else:
pars_out, suffx = func(*args)
_skipcols = set(
EC_prepare_EC_merged.mcols
+ ["RPM_DAC_uni"]
+ list(PorphSiO2_template().columns)
+ list(EC_index.columns)
+ list(SampleCodes.columns)
)
cols = [
i
for i in pars_out.columns
if i not in _skipcols and not i.startswith(f"{suffx}")
]
pars_out = pars_out.rename(columns={i: f"{suffx}_" + i for i in cols})
return pars_out
return wrapper
@edit_columns
def edit_pars_HPRR(sweep_type_select=["anodic", "cathodic"]):
hfs = []
for swp in sweep_type_select:
hprr_files = list(EC_PorphSiO2.folder.rglob(f"*{swp}*HPRR*disk*"))
# print(hprr_files)
for hf in hprr_files:
hprr_raw =
|
pd.read_excel(hf)
|
pandas.read_excel
|
"""
The :mod:`hillmaker.bydatetime` module includes functions for computing occupancy,
arrival, and departure statistics by time bin of day and date.
"""
# Copyright 2022 <NAME>
#
import logging
import numpy as np
import pandas as pd
from pandas import DataFrame
from pandas import Series
from pandas import Timestamp
from datetime import datetime
from pandas.tseries.offsets import Minute
import hillmaker.hmlib as hmlib
CONST_FAKE_OCCWEIGHT_FIELDNAME = 'FakeOccWeightField'
CONST_FAKE_CATFIELD_NAME = 'FakeCatForTotals'
OCC_TOLERANCE = 0.02
# This should inherit level from root logger
logger = logging.getLogger(__name__)
def make_bydatetime(stops_df, infield, outfield,
start_analysis_np, end_analysis_np, catfield=None,
bin_size_minutes=60,
cat_to_exclude=None,
totals=1,
occ_weight_field=None,
edge_bins=1,
verbose=0):
"""
Create bydatetime table based on user inputs.
This is the table from which summary statistics can be computed.
Parameters
----------
stops_df: DataFrame
Stop data
infield: string
Name of column in stops_df to use as arrival datetime
outfield: string
Name of column in stops_df to use as departure datetime
start_analysis_np: numpy datetime64[ns]
Start date for the analysis
end_analysis_np: numpy datetime64[ns]
End date for the analysis
catfield : string, optional
Column name corresponding to the categories. If none is specified, then only overall occupancy is analyzed.
bin_size_minutes: int, default 60
Bin size in minutes. Should divide evenly into 1440.
cat_to_exclude: list of strings, default None
Categories to ignore
edge_bins: int, default 1
Occupancy contribution method for arrival and departure bins. 1=fractional, 2=whole bin
totals: int, default 1
0=no totals, 1=totals by datetime, 2=totals bydatetime as well as totals for each field in the
catfields (only relevant for > 1 category field)
occ_weight_field : string, optional (default=1.0)
Column name corresponding to the weights to use for occupancy incrementing.
verbose : int, default 0
The verbosity level. The default, zero, means silent mode.
Returns
-------
Dict of DataFrames
Occupancy, arrivals, departures by category by datetime bin
Examples
--------
bydt_dfs = make_bydatetime(stops_df, 'InTime', 'OutTime',
... datetime(2014, 3, 1), datetime(2014, 6, 30), 'PatientType', 60)
TODO
----
* Sanity checks on date ranges
* Formal test using short stay data
* Flow conservation checks
Notes
-----
References
----------
See Also
--------
"""
# Number of bins in analysis span
num_bins = hmlib.bin_of_span(end_analysis_np, start_analysis_np, bin_size_minutes) + 1
# Compute min and max of in and out times
min_intime = stops_df[infield].min()
max_intime = stops_df[infield].max()
min_outtime = stops_df[outfield].min()
max_outtime = stops_df[outfield].max()
logger.info(f"min of intime: {min_intime}")
logger.info(f"max of intime: {max_intime}")
logger.info(f"min of outtime: {min_outtime}")
logger.info(f"max of outtime: {max_outtime}")
# TODO - Add warnings here related to min and maxes out of whack with analysis range
# Occupancy weights
# If no occ weight field specified, create fake one containing 1.0 as values.
# Avoids having to check during dataframe iteration whether or not to use
# default occupancy weight.
if occ_weight_field is None:
occ_weight_vec = np.ones(len(stops_df.index), dtype=np.float64)
occ_weight_df = DataFrame({CONST_FAKE_OCCWEIGHT_FIELDNAME: occ_weight_vec})
stops_df = pd.concat([stops_df, occ_weight_df], axis=1)
occ_weight_field = CONST_FAKE_OCCWEIGHT_FIELDNAME
# Handle cases of no catfield, or a single fieldname, (no longer supporting a list of fieldnames)
# If no category, add a temporary dummy column populated with a totals str
total_str = 'total'
do_totals = True
if catfield is not None:
# If it's a string, it's a single cat field --> convert to list
# Keeping catfield as a list in case I change mind about multiple category fields
if isinstance(catfield, str):
catfield = [catfield]
else:
totlist = [total_str] * len(stops_df)
totseries =
|
Series(totlist, dtype=str, name=CONST_FAKE_CATFIELD_NAME)
|
pandas.Series
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from collections import Counter
from functools import partial
import scipy as sp
from scipy.stats import mode
from sklearn.ensemble import ExtraTreesRegressor
import xgboost as xgb
import lightgbm as lgb
from catboost import CatBoostRegressor
from ngboost.scores import MLE
from ngboost.learners import default_tree_learner
from ngboost import NGBRegressor
from mlxtend.regressor import StackingCVRegressor, LinearRegression
# pip install --upgrade git+https://github.com/stanfordmlgroup/ngboost.git
pd.set_option('display.max_columns', 1000)
SEED = 42
def read_data():
print('Reading train.csv file....')
train = pd.read_csv('train.csv')
print('Training.csv file have {} rows and {} columns'.format(train.shape[0], train.shape[1]))
print('Reading test.csv file....')
test = pd.read_csv('test.csv')
print('Test.csv file have {} rows and {} columns'.format(test.shape[0], test.shape[1]))
print('Reading train_labels.csv file....')
train_labels = pd.read_csv('train_labels.csv')
print('Train_labels.csv file have {} rows and {} columns'.format(train_labels.shape[0], train_labels.shape[1]))
print('Reading specs.csv file....')
specs = pd.read_csv('specs.csv')
print('Specs.csv file have {} rows and {} columns'.format(specs.shape[0], specs.shape[1]))
print('Reading sample_submission.csv file....')
sample_submission = pd.read_csv('sample_submission.csv')
print('Sample_submission.csv file have {} rows and {} columns'.format(sample_submission.shape[0], sample_submission.shape[1]))
return train, test, train_labels, specs, sample_submission
def encode_title(train, test, train_labels):
# encode title
train['title_event_code'] = list(map(lambda x, y: str(x) + '_' + str(y), train['title'], train['event_code']))
test['title_event_code'] = list(map(lambda x, y: str(x) + '_' + str(y), test['title'], test['event_code']))
all_title_event_code = list(set(train["title_event_code"].unique()).union(test["title_event_code"].unique()))
# make a list with all the unique 'titles' from the train and test set
list_of_user_activities = list(set(train['title'].unique()).union(set(test['title'].unique())))
# make a list with all the unique 'event_code' from the train and test set
list_of_event_code = list(set(train['event_code'].unique()).union(set(test['event_code'].unique())))
list_of_event_id = list(set(train['event_id'].unique()).union(set(test['event_id'].unique())))
# make a list with all the unique worlds from the train and test set
list_of_worlds = list(set(train['world'].unique()).union(set(test['world'].unique())))
# create a dictionary numerating the titles
activities_map = dict(zip(list_of_user_activities, np.arange(len(list_of_user_activities))))
activities_labels = dict(zip(np.arange(len(list_of_user_activities)), list_of_user_activities))
activities_world = dict(zip(list_of_worlds, np.arange(len(list_of_worlds))))
assess_titles = list(set(train[train['type'] == 'Assessment']['title'].value_counts().index).union(set(test[test['type'] == 'Assessment']['title'].value_counts().index)))
# replace the text titles with the number titles from the dict
train['title'] = train['title'].map(activities_map)
test['title'] = test['title'].map(activities_map)
train['world'] = train['world'].map(activities_world)
test['world'] = test['world'].map(activities_world)
train_labels['title'] = train_labels['title'].map(activities_map)
win_code = dict(zip(activities_map.values(), (4100*np.ones(len(activities_map))).astype('int')))
# then, it set one element, the 'Bird Measurer (Assessment)' as 4110, 10 more than the rest
win_code[activities_map['Bird Measurer (Assessment)']] = 4110
# convert text into datetime
train['timestamp'] =
|
pd.to_datetime(train['timestamp'])
|
pandas.to_datetime
|
import dash
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import dash_core_components as dcc
import dash_html_components as html
from plotly.subplots import make_subplots
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
x = [str(a) for a in np.arange(1,53)]
# ETC
# y = [82.78, 84.33, 86.08, 82.71, 83.55, 82.22, 83.97, 81.59, 86.38,86.18, 82.47, 87.54, 84.06, 83.90, 84.38, 83.04, 86.66, 87.32, 83.21, 87.66, 83.59, 85.26, 84.22, 86.05,
# 85.16,86.31, 86.05,85.61, 86.00, 85.92, 86.63, 85.50, 85.32, 84.95, 86.07, 85.21, 84.54, 84.96, 84.50, 86.65, 84.12, 85.15, 84.59, 85.27, 83.89, 85.99, 85.32, 85.23, 84.48, 84.09, 84.93, 84.12]
# PCA
# y = [82.76, 84.67, 82.34, 82.91, 82.37, 83.25, 82.81, 82.59, 82.05, 82.21, 83.01, 83.04, 82.69, 84.20, 84.29, 83.72, 83.58, 82.79, 83.11, 82.25, 82.58, 83.93, 83.69, 83.50,
# 83.43, 82.44, 83.78, 82.39, 85.15, 83.20, 85.28, 83.06, 82.73, 83.17, 84.63, 83.99, 83.60, 83.54, 85.71, 84.31, 84.30, 83.98, 84.33, 84.84, 84.46, 85.53, 82.07, 82.15, 82.19, 82.14, 82.17, 82.19 ] + [0]*0
# ETC
# x = [1,5,10,15,20,25,30,35,40,45]
# y = [73.51, 100, 100, 100, 100, 99, 100, 100, 100, 98]
# ETC simplificado
x = [1,2,3,4,5]
y = [83.20,75.23,76.54,100,100]
# PCA
# xx = [1,5,10,15,20,25,30,35,40,45]
# yy = [75.49, 100, 90.86, 93.93, 99.68, 100, 100, 100, 100, 100]
# PCA simplificado
# x = [36,37,38,39,40]
# y = [83.90,0,0,0,0,0]
# ETC with PCA
# x = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
# y = [71.22, 60.84, 85.84, 85.28, 88.92, 83.73, 83.77, 86.63, 86.03, 83.70, 83.88, 83.30, 83.18, 82.88, 83.93, 85.07, 80.16, 86.83,83.02,84.99]
data = {'NrFeatures': x, 'F1Score': y}
df =
|
pd.DataFrame(data=data)
|
pandas.DataFrame
|
import json
import random
import numpy as np
import os
import glob
import pandas as pd
high = ['State']
med = ['AreaCode', 'HasChild', 'SingleExemp', 'Zip']
low = ['MaritalStatus', 'ChildExemp', 'City']
def holoclean_test_gen(dataset_path, report_path):
# read tax dataset
dataset = pd.read_csv(dataset_path)
tuple_num = dataset.shape[0]
print(tuple_num)
extension = 'csv'
all_filenames = glob.glob(os.path.join(report_path, '*.{}'.format(extension)))
combined_report = pd.concat([
|
pd.read_csv(f, sep='\s*;\s*', error_bad_lines=False)
|
pandas.read_csv
|
import sys
import time
import math
import warnings
import numpy as np
import pandas as pd
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from fmlc.triggering import triggering
from fmlc.baseclasses import eFMU
from fmlc.stackedclasses import controller_stack
class testcontroller1(eFMU):
def __init__(self):
self.input = {'a': None, 'b': None}
self.output = {'c': None}
self.init = True
def compute(self):
self.init= False
self.output['c'] = self.input['a'] * self.input['b']
return 'testcontroller1 did a computation!'
class testcontroller2(eFMU):
def __init__(self):
self.input = {'a': None, 'b': None}
self.output = {'c': None}
self.init = True
def compute(self):
self.init = False
self.output['c'] = self.input['a'] * self.input['b']
time.sleep(0.2)
return 'testcontroller2 did a computation!'
class testcontroller3(eFMU):
def __init__(self):
self.input = {'a': None, 'b': None}
self.output = {'c': None}
self.init = True
def compute(self):
self.init = False
self.output['c'] = self.input['a'] * self.input['b']
time.sleep(1)
return 'testcontroller3 did a computation!'
class testcontroller4(eFMU):
def __init__(self):
self.input = {'a': None, 'b': None}
self.output = {'c': None}
self.init = True
def compute(self):
self.init = False
self.output['c'] = self.input['a'] * self.input['b']
time.sleep(10)
return 'testcontroller4 did a computation!'
def test_sampletime():
'''This tests if the sample time is working properly'''
controller = {}
controller['forecast1'] = {'function': testcontroller1, 'sampletime': 3}
mapping = {}
mapping['forecast1_a'] = 10
mapping['forecast1_b'] = 4
controller = controller_stack(controller, mapping, tz=-8, debug=True, parallel=True)
now = time.time()
while time.time() - now < 10:
controller.query_control(time.time())
df = pd.DataFrame(controller.log_to_df()['forecast1'])
assert df.shape[0] == 5
for i in (np.diff(df.index) / np.timedelta64(1, 's'))[1:]:
assert(math.isclose(i, 3, rel_tol=0.01))
def test_normal():
controller = {}
controller['forecast1'] = {'function':testcontroller1, 'sampletime':1}
controller['mpc1'] = {'function':testcontroller2, 'sampletime':'forecast1'}
controller['control1'] = {'function':testcontroller1, 'sampletime':'mpc1'}
controller['forecast2'] = {'function':testcontroller3, 'sampletime':2}
controller['forecast3'] = {'function':testcontroller1, 'sampletime': 1}
mapping = {}
mapping['forecast1_a'] = 10
mapping['forecast1_b'] = 4
mapping['forecast2_a'] = 20
mapping['forecast2_b'] = 4
mapping['forecast3_a'] = 30
mapping['forecast3_b'] = 4
mapping['mpc1_a'] = 'forecast1_c'
mapping['mpc1_b'] = 'forecast1_a'
mapping['control1_a'] = 'mpc1_c'
mapping['control1_b'] = 'mpc1_a'
controller = controller_stack(controller, mapping, tz=-8, debug=True, parallel=True, timeout=2, workers=100)
controller.run_query_control_for(5)
df1 = pd.DataFrame(controller.log_to_df()['forecast1'])
df2 = pd.DataFrame(controller.log_to_df()['forecast2'])
df3 = pd.DataFrame(controller.log_to_df()['forecast3'])
df4 = pd.DataFrame(controller.log_to_df()['mpc1'])
df5 = pd.DataFrame(controller.log_to_df()['control1'])
# Check number of records
assert df1.shape[0] == 7
assert df2.shape[0] == 4
assert df3.shape[0] == 7
assert df4.shape[0] == 7
assert df5.shape[0] == 7
# Check contents of records
assert pd.isna(df1['a'][0])
assert pd.isna(df1['b'][0])
assert pd.isna(df1['c'][0])
assert pd.isna(df2['a'][0])
assert pd.isna(df2['b'][0])
assert pd.isna(df2['c'][0])
assert pd.isna(df3['a'][0])
assert pd.isna(df3['b'][0])
assert pd.isna(df3['c'][0])
assert pd.isna(df4['a'][0])
assert pd.isna(df4['b'][0])
assert pd.isna(df4['c'][0])
assert pd.isna(df5['a'][0])
assert pd.isna(df5['b'][0])
assert pd.isna(df5['c'][0])
assert list(df1['a'])[1:] == [10.0, 10.0, 10.0, 10.0, 10.0, 10.0]
assert list(df1['b'])[1:] == [4.0, 4.0, 4.0, 4.0, 4.0, 4.0]
assert list(df1['c'])[1:] == [40.0, 40.0, 40.0, 40.0, 40.0, 40.0]
assert list(df2['a'])[1:] == [20.0, 20.0, 20.0]
assert list(df2['b'])[1:] == [4.0, 4.0, 4.0]
assert list(df2['c'])[1:] == [80.0, 80.0, 80.0]
assert list(df3['a'])[1:] == [30.0, 30.0, 30.0, 30.0, 30.0, 30.0]
assert list(df3['b'])[1:] == [4.0, 4.0, 4.0, 4.0, 4.0, 4.0]
assert list(df3['c'])[1:] == [120.0, 120.0, 120.0, 120.0, 120.0, 120.0]
assert list(df4['a'])[1:] == [40.0, 40.0, 40.0, 40.0, 40.0, 40.0]
assert list(df4['b'])[1:] == [10.0, 10.0, 10.0, 10.0, 10.0, 10.0]
assert list(df4['c'])[1:] == [400.0, 400.0, 400.0, 400.0, 400.0, 400.0]
assert list(df5['a'])[1:] == [400.0, 400.0, 400.0, 400.0, 400.0, 400.0]
assert list(df5['b'])[1:] == [40.0, 40.0, 40.0, 40.0, 40.0, 40.0]
assert list(df5['c'])[1:] == [16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0]
assert list(df1['logging']) == ['Initialize', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!']
assert list(df2['logging']) == ['Initialize', 'testcontroller3 did a computation!', 'testcontroller3 did a computation!', 'testcontroller3 did a computation!']
assert list(df3['logging']) == ['Initialize', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!']
assert list(df4['logging']) == ['Initialize', 'testcontroller2 did a computation!', 'testcontroller2 did a computation!', 'testcontroller2 did a computation!', 'testcontroller2 did a computation!', 'testcontroller2 did a computation!', 'testcontroller2 did a computation!']
assert list(df5['logging']) == ['Initialize', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!']
def test_stuckController():
'''This tests if the timeout controllers can be caught'''
## CASE1: mpc1 stuck
controller = {}
controller['forecast1'] = {'function':testcontroller1, 'sampletime':1}
controller['mpc1'] = {'function':testcontroller4, 'sampletime':'forecast1'}
controller['control1'] = {'function':testcontroller1, 'sampletime':'mpc1'}
controller['forecast2'] = {'function':testcontroller1, 'sampletime':1}
controller['forecast3'] = {'function':testcontroller1, 'sampletime':1}
mapping = {}
mapping['forecast1_a'] = 10
mapping['forecast1_b'] = 4
mapping['forecast2_a'] = 20
mapping['forecast2_b'] = 4
mapping['forecast3_a'] = 30
mapping['forecast3_b'] = 4
mapping['mpc1_a'] = 'forecast1_c'
mapping['mpc1_b'] = 'forecast1_a'
mapping['control1_a'] = 'mpc1_c'
mapping['control1_b'] = 'mpc1_a'
controller = controller_stack(controller, mapping, tz=-8, debug=True, parallel=True, timeout=0.5, workers=100)
# Catch warning.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
controller.run_query_control_for(2)
assert len(w) == 3
assert "timeout" in str(w[-1].message)
df1 = pd.DataFrame(controller.log_to_df()['forecast1'])
df2 = pd.DataFrame(controller.log_to_df()['forecast2'])
df3 = pd.DataFrame(controller.log_to_df()['forecast3'])
df4 = pd.DataFrame(controller.log_to_df()['mpc1'])
df5 = pd.DataFrame(controller.log_to_df()['control1'])
# Check number of records
assert df1.shape[0] == 4
assert df2.shape[0] == 4
assert df3.shape[0] == 4
#assert df4.shape[0] == 1
assert df5.shape[0] == 1
#assert len(df4.columns) == 1
assert len(df5.columns) == 1
# Check contents of records
assert
|
pd.isna(df1['a'][0])
|
pandas.isna
|
#!/Tsan/bin/python
# -*- coding: utf-8 -*-
# Libraries to use
from __future__ import division
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
import json
import mysql.connector
# 读取数据库的指针设置
with open('conf.json', 'r') as fd:
conf = json.load(fd)
src_db = mysql.connector.connect(**conf['src_db'])
# 一些常量
riskFreeRate = 0.02 # 无风险利率
varThreshold =0.05 # 5%VAR阈值
scaleParameter = 50 # 一年50周
# 表名
index_data_table = 'fund_weekly_index' # index时间序列数据
index_name_table = 'index_id_name_mapping'
type_index_table = 'index_stype_code_mapping' # 表格名称-不同基金种类对应的指数
# 私募指数基金分类表格对应(只需要跑一次)
def get_type_index_table(tableName = type_index_table):
try:
#sql_query='select id,name from student where age > %s'
cursor = src_db .cursor()
sql = "select * from %s" % (tableName)
cursor.execute(sql)
result = cursor.fetchall()
finally:
pass
#pdResult = dict(result)
pdResult = pd.DataFrame(result)
pdResult = pdResult.dropna(axis=0)
pdResult.columns = [i[0] for i in cursor.description]
pdResult.set_index('stype_code',inplace=True)
return pdResult
# 私募指数名称及ID分类表格对应(只需要跑一次)
def get_index_table(tableName = index_name_table):
try:
#sql_query='select id,name from student where age > %s'
cursor = src_db .cursor()
sql = "select * from %s" % (tableName)
cursor.execute(sql)
result = cursor.fetchall()
finally:
pass
#pdResult = dict(result)
pdResult = pd.DataFrame(result)
pdResult = pdResult.dropna(axis=0)
pdResult.columns = [i[0] for i in cursor.description]
pdResult.set_index('index_id',inplace=True)
return pdResult
# 私募指数净值的时间序列
def get_index(index, tableName=index_data_table):
try:
# sql_query='select id,name from student where age > %s'
cursor = src_db.cursor()
sql = "select index_id,statistic_date,index_value from %s where index_id = '%s'" % (tableName, index)
cursor.execute(sql)
result = cursor.fetchall()
finally:
pass
pdResult = pd.DataFrame(result, dtype=float)
pdResult.columns = ['index', 'date', 'net_worth']
pdResult = pdResult.drop_duplicates().set_index('date')
pdResult = pdResult.dropna(axis=0)
pdResult = pdResult.fillna(method='ffill')
return pdResult
# 按季度分类
def byseasons(x):
if 1<=x.month<=3:
return str(x.year)+'_'+str(1)
elif 4<= x.month <=6:
return str(x.year)+'_'+str(2)
elif 7<= x.month <=9:
return str(x.year)+'_'+str(3)
else:
return str(x.year)+'_'+str(4)
# 计算最大回撤,最大回撤开始结束时间
def cal_max_dd_indicator(networthSeries):
maxdd = pd.DataFrame(index = networthSeries.index, data=None, columns =['max_dd','max_dd_start_date','max_dd_end_date'],dtype = float)
maxdd.iloc[0] = 0
maxdd.is_copy = False
for date in networthSeries.index[1:]:
maxdd.loc[date] = [1 - networthSeries.loc[date] / networthSeries.loc[:date].max(),networthSeries.loc[:date].idxmax(),date]
#maxdd[['max_dd_start_date','max_dd_end_date']].loc[date] = [[networthSeries.loc[:date].idxmax(),date]]
#maxdd['max_dd_start_date'].loc[date] = networthSeries.loc[:date].idxmax()
return maxdd['max_dd'].max(), maxdd.loc[maxdd['max_dd'].idxmax]['max_dd_start_date'],maxdd.loc[maxdd['max_dd'].idxmax]['max_dd_end_date']
# 计算最大回撤(每季度),输入为dataframe,输出也为dataframe
def cal_maxdd_by_season(df):
seasonList = sorted(list(set(df['season'].values)))
maxdd_dict = {}
for season in seasonList:
temp = df[df['season'] == season]
maxdd_dict[season] = np.round(cal_max_dd_indicator(temp['net_worth'])[0],4)
maxdd_df = pd.DataFrame([maxdd_dict]).T
maxdd_df.columns =[df['index'].iloc[0]]
maxdd_df.index.name = 'season'
return maxdd_df
# 计算最大回撤(每年),输入为dataframe,输出也为dataframe
def cal_maxdd_by_year(df):
seasonList = sorted(list(set(df['year'].values)))
maxdd_dict = {}
for season in seasonList:
temp = df[df['year'] == season]
maxdd_dict[season] = np.round(cal_max_dd_indicator(temp['net_worth'])[0],4)
maxdd_df = pd.DataFrame([maxdd_dict]).T
maxdd_df.columns =[df['index'].iloc[0]]
maxdd_df.index.name = 'year'
return maxdd_df
# 准备数据原始dataframe
def get_count_data(cnx):
cursor = cnx.cursor()
sql = "select fund_id,foundation_date,fund_type_strategy from fund_info"
cursor.execute(sql)
result = cursor.fetchall()
df = pd.DataFrame(result)
df.columns = ['fund_id', 'found_date', 'strategy']
sql = "select type_id, strategy from index_type_mapping"
cursor.execute(sql)
result = cursor.fetchall()
meg = pd.DataFrame(result)
meg.columns = ['type_id', 'strategy']
# 数据清理
df = df.dropna()
df = df[df['strategy'] != u'']
# 合并对应表
df = pd.merge(df, meg)
# 加年份列
df['year'] = [str(i.year) for i in df['found_date']]
# 加月份列
df['month'] = [str(i.year) + '_' + str(i.month) for i in df['found_date']]
return df.drop('strategy', axis=1)
# 得到按年份分类统计,输出 dataframe
def get_ann_fund(df):
temp = df.groupby(['type_id', 'year'])['fund_id'].count().to_frame() # 分类转dataframe
temp = pd.pivot_table(temp, values='fund_id', index='year', columns=['type_id'])
temp['Type_0'] = df.groupby(['year'])['fund_id'].count().to_frame()['fund_id'] # 添加全市场数据
temp.sort_index(axis=0)
temp.sort_index(axis=1, inplace=True)
return temp
# 得到按月份分类统计, 输出dataframe
def get_month_fund(df):
temp = df.groupby(['type_id', 'month'])['fund_id'].count().to_frame() # 分类转dataframe
temp = pd.pivot_table(temp, values='fund_id', index=['month'], columns=['type_id'])
temp['Type_0'] = df.groupby(['month'])['fund_id'].count().to_frame()['fund_id'] # 添加全市场数据
temp.sort_index(axis=0)
temp.sort_index(axis=1, inplace=True)
return temp
# 准备数据原始dataframe
def get_org_count(cnx):
cursor = cnx.cursor()
sql = "SELECT org_id, found_date FROM PrivateEquityFund.org_info WHERE org_category LIKE '4%'"
cursor.execute(sql)
result = cursor.fetchall()
df = pd.DataFrame(result)
df.columns = ['org_id', 'found_date']
# 数据清理
df = df.dropna()
# 加年份列
df['year'] = [str(i.year) for i in df['found_date']]
# 加月份列
df['month'] = [str(i.year) + '_0' + str(i.month) if i.month < 10 else str(i.year) + '_' + str(i.month) for i in
df['found_date']]
return df
# 得到按年份分类统计,输出 dataframe
def get_ann_org(df):
temp = df.groupby(['year'])['org_id'].count().to_frame() # 分类转dataframe
temp.sort_index(axis=0)
return temp
# 得到按月份分类统计, 输出dataframe
def get_month_org(df):
temp = df.groupby(['month'])['org_id'].count().to_frame() # 分类转dataframe
temp.sort_index(axis=0)
return temp
if __name__ == '__main__':
# 计算季度指标
maxddbyseason = pd.DataFrame() # 季度最大回撤
retbyseason = pd.DataFrame() # 季度收益
stdbyseason = pd.DataFrame() # 极度标准差
sharpebyseason =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
test_charts.py
Tests for the charts.py module.
"""
from __future__ import absolute_import, print_function, division, unicode_literals
import unittest
import matplotlib
import pandas as pd
from hydrofunctions import charts
import hydrofunctions as hf
from .fixtures import JSON15min2day as test_json
dummy = {"col1": [1, 2, 3, 38, 23, 1, 19], "col2": [3, 4, 45, 23, 2, 4, 76]}
class TestFlowDuration(unittest.TestCase):
def test_charts_flowduration_exists(self):
expected = pd.DataFrame(data=dummy)
actual_fig, actual_ax = charts.flow_duration(expected)
self.assertIsInstance(actual_fig, matplotlib.figure.Figure)
self.assertIsInstance(actual_ax, matplotlib.axes.Axes)
def test_charts_flowduration_defaults(self):
expected = pd.DataFrame(data=dummy)
actual_fig, actual_ax = charts.flow_duration(expected)
actual_xscale = actual_ax.xaxis.get_scale()
actual_yscale = actual_ax.yaxis.get_scale()
actual_ylabel = actual_ax.yaxis.get_label_text()
actual_marker = actual_ax.get_lines()[0].get_marker()
actual_legend = actual_ax.get_legend()
actual_legend_loc = actual_legend._loc
actual_title = actual_ax.get_title()
self.assertEqual(actual_xscale, "logit")
self.assertEqual(actual_yscale, "log")
self.assertEqual(actual_ylabel, "Stream Discharge (m³/s)")
self.assertEqual(actual_marker, ".")
self.assertTrue(actual_legend)
self.assertEqual(actual_legend_loc, 0) # '0' is internal code for 'best'.
self.assertEqual(actual_title, "")
def test_charts_flowduration_accepts_params(self):
expected =
|
pd.DataFrame(data=dummy)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 10:15:25 2021
@author: lenakilian
"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import copy as cp
import geopandas as gpd
wd = r'/Users/lenakilian/Documents/Ausbildung/UoLeeds/PhD/Analysis/'
years = list(range(2007, 2018, 2))
geog = 'MSOA'
dict_cat = 'category_8'
cat_dict = pd.read_excel(wd + '/data/processed/LCFS/Meta/lcfs_desc_anne&john.xlsx')
cat_list = cat_dict[[dict_cat]].drop_duplicates()[dict_cat].tolist()
cat_list.remove('other')
# set font globally
plt.rcParams.update({'font.family':'Times New Roman'})
# load region and 2001 to 2011 lookup
lookup = pd.read_csv(wd + 'data/raw/Geography/Conversion_Lookups/UK_full_lookup_2001_to_2011.csv')\
[['MSOA11CD', 'MSOA01CD', 'RGN11NM']].drop_duplicates()
ew_shp = gpd.read_file(wd + 'data/raw/Geography/Shapefiles/EnglandWales/msoa_2011_ew.shp')\
.set_index('msoa11cd').join(lookup.set_index('MSOA11CD'), how='left')
lon_shp = ew_shp.loc[ew_shp['RGN11NM'] == 'London']
emissions = {}
for year in years:
year_difference = years[1] - years[0]
year_str = str(year) + '-' + str(year + year_difference - 1)
emissions[year] =
|
pd.read_csv(wd + 'data/processed/GHG_Estimates/' + geog + '_' + year_str + '.csv', index_col=0)
|
pandas.read_csv
|
import os
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_file_path, categories_file_path):
"""Load the data from the messages and categories CSV files and concatenate them into a data frame.
Parameters:
messages_file_path (str): The path of the CSV file that contains the messages.
categories_file_path (str): The path of the CSV file that contains the categories.
Returns:
df (pandas.core.frame.DataFrame): Concatenated data frame from messages and categories.
Example:
df = load_data('disaster_messages.csv', 'disaster_categories.csv')
"""
messages = pd.read_csv(messages_file_path)
categories = pd.read_csv(categories_file_path)
df = messages.join(categories.set_index('id'), on='id')
return df
def clean_data(df):
"""Split the categories column into multiple ones and drop duplicates from the data frame.
Parameters:
df (pandas.core.frame.DataFrame): The data frame to be cleaned.
Returns:
df (pandas.core.frame.DataFrame): Cleaned data frame.
"""
categories = df['categories'].str.split(';', expand=True)
row = categories.iloc[0, ]
category_col_names = list(map(lambda x: x[0:len(x) - 2], row))
categories.columns = category_col_names
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].astype('string').str[-1]
# convert column from string to numeric
categories[column] =
|
pd.to_numeric(categories[column])
|
pandas.to_numeric
|
from datetime import (
datetime,
timedelta,
)
from importlib import reload
import string
import sys
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
import pandas.util._test_decorators as td
from pandas import (
NA,
Categorical,
CategoricalDtype,
Index,
Interval,
NaT,
Series,
Timedelta,
Timestamp,
cut,
date_range,
)
import pandas._testing as tm
class TestAstypeAPI:
def test_arg_for_errors_in_astype(self):
# see GH#14878
ser = Series([1, 2, 3])
msg = (
r"Expected value of kwarg 'errors' to be one of \['raise', "
r"'ignore'\]\. Supplied value is 'False'"
)
with pytest.raises(ValueError, match=msg):
ser.astype(np.float64, errors=False)
ser.astype(np.int8, errors="raise")
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# see GH#7271
ser = Series(range(0, 10, 2), name="abc")
dt1 = dtype_class({"abc": str})
result = ser.astype(dt1)
expected = Series(["0", "2", "4", "6", "8"], name="abc")
tm.assert_series_equal(result, expected)
dt2 = dtype_class({"abc": "float64"})
result = ser.astype(dt2)
expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc")
tm.assert_series_equal(result, expected)
dt3 = dtype_class({"abc": str, "def": str})
msg = (
"Only the Series name can be used for the key in Series dtype "
r"mappings\."
)
with pytest.raises(KeyError, match=msg):
ser.astype(dt3)
dt4 = dtype_class({0: str})
with pytest.raises(KeyError, match=msg):
ser.astype(dt4)
# GH#16717
# if dtypes provided is empty, it should error
if dtype_class is Series:
dt5 = dtype_class({}, dtype=object)
else:
dt5 = dtype_class({})
with pytest.raises(KeyError, match=msg):
ser.astype(dt5)
class TestAstype:
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_astype_empty_constructor_equality(self, dtype):
# see GH#15524
if dtype not in (
"S",
"V", # poor support (if any) currently
"M",
"m", # Generic timestamps raise a ValueError. Already tested.
):
init_empty = Series([], dtype=dtype)
with tm.assert_produces_warning(DeprecationWarning):
as_type_empty = Series([]).astype(dtype)
tm.assert_series_equal(init_empty, as_type_empty)
@pytest.mark.parametrize("dtype", [str, np.str_])
@pytest.mark.parametrize(
"series",
[
Series([string.digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([string.digits * 10, tm.rands(63), tm.rands(64), np.nan, 1.0]),
],
)
def test_astype_str_map(self, dtype, series):
# see GH#4405
result = series.astype(dtype)
expected = series.map(str)
tm.assert_series_equal(result, expected)
def test_astype_float_to_period(self):
result = Series([np.nan]).astype("period[D]")
expected = Series([NaT], dtype="period[D]")
tm.assert_series_equal(result, expected)
def test_astype_no_pandas_dtype(self):
# https://github.com/pandas-dev/pandas/pull/24866
ser = Series([1, 2], dtype="int64")
# Don't have PandasDtype in the public API, so we use `.array.dtype`,
# which is a PandasDtype.
result = ser.astype(ser.array.dtype)
tm.assert_series_equal(result, ser)
@pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64])
def test_astype_generic_timestamp_no_frequency(self, dtype, request):
# see GH#15524, GH#15987
data = [1]
s = Series(data)
if np.dtype(dtype).name not in ["timedelta64", "datetime64"]:
mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit")
request.node.add_marker(mark)
msg = (
fr"The '{dtype.__name__}' dtype has no unit\. "
fr"Please pass in '{dtype.__name__}\[ns\]' instead."
)
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
def test_astype_dt64_to_str(self):
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
dti = date_range("2012-01-01", periods=3)
result = Series(dti).astype(str)
expected = Series(["2012-01-01", "2012-01-02", "2012-01-03"], dtype=object)
tm.assert_series_equal(result, expected)
def test_astype_dt64tz_to_str(self):
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
dti_tz = date_range("2012-01-01", periods=3, tz="US/Eastern")
result = Series(dti_tz).astype(str)
expected = Series(
[
"2012-01-01 00:00:00-05:00",
"2012-01-02 00:00:00-05:00",
"2012-01-03 00:00:00-05:00",
],
dtype=object,
)
tm.assert_series_equal(result, expected)
def test_astype_datetime(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
assert s.dtype == "M8[ns]"
s = s.astype("O")
assert s.dtype == np.object_
def test_astype_datetime64tz(self):
s = Series(date_range("20130101", periods=3, tz="US/Eastern"))
# astype
result = s.astype(object)
expected = Series(s.astype(object), dtype=object)
tm.assert_series_equal(result, expected)
result = Series(s.values).dt.tz_localize("UTC").dt.tz_convert(s.dt.tz)
tm.assert_series_equal(result, s)
# astype - object, preserves on construction
result = Series(s.astype(object))
expected = s.astype(object)
tm.assert_series_equal(result, expected)
# astype - datetime64[ns, tz]
with tm.assert_produces_warning(FutureWarning):
# dt64->dt64tz astype deprecated
result = Series(s.values).astype("datetime64[ns, US/Eastern]")
tm.assert_series_equal(result, s)
with tm.assert_produces_warning(FutureWarning):
# dt64->dt64tz astype deprecated
result = Series(s.values).astype(s.dtype)
tm.assert_series_equal(result, s)
result = s.astype("datetime64[ns, CET]")
expected = Series(date_range("20130101 06:00:00", periods=3, tz="CET"))
tm.assert_series_equal(result, expected)
def test_astype_str_cast_dt64(self):
# see GH#9757
ts = Series([Timestamp("2010-01-04 00:00:00")])
s = ts.astype(str)
expected = Series(["2010-01-04"])
tm.assert_series_equal(s, expected)
ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")])
s = ts.astype(str)
expected = Series(["2010-01-04 00:00:00-05:00"])
tm.assert_series_equal(s, expected)
def test_astype_str_cast_td64(self):
# see GH#9757
td = Series([
|
Timedelta(1, unit="d")
|
pandas.Timedelta
|
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
pass
def test_setitem_series_period(self):
pass
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
def test_setitem_index_object(self):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
self.assertEqual(obj.index.dtype, np.object)
# object + object -> object
exp_index = pd.Index(list('abcdx'))
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
# object + int -> IndexError, regarded as location
temp = obj.copy()
with tm.assertRaises(IndexError):
temp[5] = 5
# object + float -> object
exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object)
def test_setitem_index_int64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.index.dtype, np.int64)
# int + int -> int
exp_index = pd.Index([0, 1, 2, 3, 5])
self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64)
# int + float -> float
exp_index = pd.Index([0, 1, 2, 3, 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.float64)
# int + object -> object
exp_index = pd.Index([0, 1, 2, 3, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_float64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(obj.index.dtype, np.float64)
# float + int -> int
temp = obj.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 5.1])
self._assert_setitem_index_conversion(obj, 5.1, exp_index, np.float64)
# float + object -> object
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase, tm.TestCase):
klasses = ['index']
method = 'insert'
def _assert_insert_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
self.assertEqual(res.dtype, expected_dtype)
def test_insert_index_object(self):
obj = pd.Index(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Index(['a', 1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Index(['a', 1.1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1.1, exp, np.object)
# object + bool -> object
res = obj.insert(1, False)
tm.assert_index_equal(res, pd.Index(['a', False, 'b', 'c', 'd']))
self.assertEqual(res.dtype, np.object)
# object + object -> object
exp = pd.Index(['a', 'x', 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_int64(self):
obj = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Index([1, 1, 2, 3, 4])
self._assert_insert_conversion(obj, 1, exp, np.int64)
# int + float -> float
exp = pd.Index([1, 1.1, 2, 3, 4])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# int + bool -> int
exp = pd.Index([1, 0, 2, 3, 4])
self._assert_insert_conversion(obj, False, exp, np.int64)
# int + object -> object
exp = pd.Index([1, 'x', 2, 3, 4])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_float64(self):
obj = pd.Float64Index([1., 2., 3., 4.])
self.assertEqual(obj.dtype, np.float64)
# float + int -> int
exp = pd.Index([1., 1., 2., 3., 4.])
self._assert_insert_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Index([1., 1.1, 2., 3., 4.])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# float + bool -> float
exp = pd.Index([1., 0., 2., 3., 4.])
self._assert_insert_conversion(obj, False, exp, np.float64)
# float + object -> object
exp = pd.Index([1., 'x', 2., 3., 4.])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_complex128(self):
pass
def test_insert_index_bool(self):
pass
def test_insert_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'])
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_datetime64tz(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], tz='US/Eastern')
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'], tz='US/Eastern')
val = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_insert_conversion(obj, val, exp,
'datetime64[ns, US/Eastern]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='Asia/Tokyo'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(['1 day', '2 day', '3 day', '4 day'])
self.assertEqual(obj.dtype, 'timedelta64[ns]')
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(['1 day', '10 day', '2 day', '3 day', '4 day'])
self._assert_insert_conversion(obj, pd.Timedelta('10 day'),
exp, 'timedelta64[ns]')
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_period(self):
obj = pd.PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'],
freq='M')
self.assertEqual(obj.dtype, 'period[M]')
# period + period => period
exp = pd.PeriodIndex(['2011-01', '2012-01', '2011-02',
'2011-03', '2011-04'], freq='M')
self._assert_insert_conversion(obj, pd.Period('2012-01', freq='M'),
exp, 'period[M]')
# period + datetime64 => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
pd.Timestamp('2012-01-01'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, np.object)
# period + int => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
1,
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 1, exp, np.object)
# period + object => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
'x',
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 'x', exp, np.object)
class TestWhereCoercion(CoercionBase, tm.TestCase):
method = 'where'
def _assert_where_conversion(self, original, cond, values,
expected, expected_dtype):
""" test coercion triggered by where """
target = original.copy()
res = target.where(cond, values)
self._assert(res, expected, expected_dtype)
def _where_object_common(self, klass):
obj = klass(list('abcd'))
self.assertEqual(obj.dtype, np.object)
cond = klass([True, False, True, False])
# object + int -> object
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, 1, exp, np.object)
values = klass([5, 6, 7, 8])
exp = klass(['a', 6, 'c', 8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.object)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass(['a', 6.6, 'c', 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.object)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass(['a', 6 + 6j, 'c', 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.object)
if klass is pd.Series:
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', 0, 'c', 1])
self._assert_where_conversion(obj, cond, values, exp, np.object)
elif klass is pd.Index:
# object + bool -> object
exp = klass(['a', True, 'c', True])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', False, 'c', True])
self._assert_where_conversion(obj, cond, values, exp, np.object)
else:
NotImplementedError
def test_where_series_object(self):
self._where_object_common(pd.Series)
def test_where_index_object(self):
self._where_object_common(pd.Index)
def _where_int64_common(self, klass):
obj = klass([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
cond = klass([True, False, True, False])
# int + int -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = klass([5, 6, 7, 8])
exp = klass([1, 6, 3, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# int + float -> float
exp = klass([1, 1.1, 3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1, 6.6, 3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# int + complex -> complex
if klass is pd.Series:
exp = klass([1, 1 + 1j, 3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1, 6 + 6j, 3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# int + bool -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, True, exp, np.int64)
values = klass([True, False, True, True])
exp = klass([1, 0, 3, 1])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
def test_where_series_int64(self):
self._where_int64_common(pd.Series)
def test_where_index_int64(self):
self._where_int64_common(pd.Index)
def _where_float64_common(self, klass):
obj = klass([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
cond = klass([True, False, True, False])
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, 1, exp, np.float64)
values = klass([5, 6, 7, 8])
exp = klass([1.1, 6.0, 3.3, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1.1, 6.6, 3.3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + complex -> complex
if klass is pd.Series:
exp = klass([1.1, 1 + 1j, 3.3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1.1, 6 + 6j, 3.3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, True, exp, np.float64)
values = klass([True, False, True, True])
exp = klass([1.1, 0.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
def test_where_series_float64(self):
self._where_float64_common(pd.Series)
def test_where_index_float64(self):
self._where_float64_common(pd.Index)
def test_where_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
cond = pd.Series([True, False, True, False])
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.complex128)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1 + 1j, 6.0, 3 + 3j, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.complex128)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1 + 1j, 6.6, 3 + 3j, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1 + 1j, 6 + 6j, 3 + 3j, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, True, exp, np.complex128)
values = pd.Series([True, False, True, True])
exp = pd.Series([1 + 1j, 0, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
def test_where_index_complex128(self):
pass
def test_where_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
cond = pd.Series([True, False, True, False])
# bool + int -> int
exp = pd.Series([1, 1, 1, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1, 6, 1, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# bool + float -> float
exp = pd.Series([1.0, 1.1, 1.0, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1.0, 6.6, 1.0, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# bool + complex -> complex
exp = pd.Series([1, 1 + 1j, 1, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1, 6 + 6j, 1, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# bool + bool -> bool
exp = pd.Series([True, True, True, True])
self._assert_where_conversion(obj, cond, True, exp, np.bool)
values = pd.Series([True, False, True, True])
exp = pd.Series([True, False, True, True])
self._assert_where_conversion(obj, cond, values, exp, np.bool)
def test_where_index_bool(self):
pass
def test_where_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
cond = pd.Series([True, False, True, False])
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-01')])
self._assert_where_conversion(obj, cond, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
values = pd.Series([pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03'),
pd.Timestamp('2012-01-04')])
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
# ToDo: coerce to object
msg = "cannot coerce a Timestamp with a tz on a naive Block"
with tm.assertRaisesRegexp(TypeError, msg):
obj.where(cond, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: do not coerce to UTC, must be object
values = pd.Series([pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2012-01-02', tz='US/Eastern'),
pd.Timestamp('2012-01-03', tz='US/Eastern'),
pd.Timestamp('2012-01-04', tz='US/Eastern')])
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02 05:00'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04 05:00')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
def test_where_index_datetime64(self):
obj = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
cond = pd.Index([True, False, True, False])
# datetime64 + datetime64 -> datetime64
# must support scalar
msg = "cannot coerce a Timestamp with a tz on a naive Block"
with tm.assertRaises(TypeError):
obj.where(cond, pd.Timestamp('2012-01-01'))
values = pd.Index([pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03'),
pd.Timestamp('2012-01-04')])
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
# ToDo: coerce to object
msg = ("Index\\(\\.\\.\\.\\) must be called with a collection "
"of some kind")
with tm.assertRaisesRegexp(TypeError, msg):
obj.where(cond, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: do not ignore timezone, must be object
values = pd.Index([pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2012-01-02', tz='US/Eastern'),
pd.Timestamp('2012-01-03', tz='US/Eastern'),
pd.Timestamp('2012-01-04', tz='US/Eastern')])
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
def test_where_series_datetime64tz(self):
pass
def test_where_series_timedelta64(self):
pass
def test_where_series_period(self):
pass
def test_where_index_datetime64tz(self):
pass
def test_where_index_timedelta64(self):
pass
def test_where_index_period(self):
pass
class TestFillnaSeriesCoercion(CoercionBase, tm.TestCase):
# not indexing, but place here for consisntency
method = 'fillna'
def _assert_fillna_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by fillna """
target = original.copy()
res = target.fillna(value)
self._assert(res, expected, expected_dtype)
def _fillna_object_common(self, klass):
obj = klass(['a', np.nan, 'c', 'd'])
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = klass(['a', 1, 'c', 'd'])
self._assert_fillna_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 'd'])
self._assert_fillna_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 'd'])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = klass(['a', True, 'c', 'd'])
self._assert_fillna_conversion(obj, True, exp, np.object)
def test_fillna_series_object(self):
self._fillna_object_common(pd.Series)
def test_fillna_index_object(self):
self._fillna_object_common(pd.Index)
def test_fillna_series_int64(self):
# int can't hold NaN
pass
def test_fillna_index_int64(self):
pass
def _fillna_float64_common(self, klass):
obj = klass([1.1, np.nan, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1.1, exp, np.float64)
if klass is pd.Series:
# float + complex -> complex
exp = klass([1.1, 1 + 1j, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.complex128)
elif klass is pd.Index:
# float + complex -> object
exp = klass([1.1, 1 + 1j, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.object)
else:
NotImplementedError
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 4.4])
self._assert_fillna_conversion(obj, True, exp, np.float64)
def test_fillna_series_float64(self):
self._fillna_float64_common(pd.Series)
def test_fillna_index_float64(self):
self._fillna_float64_common(pd.Index)
def test_fillna_series_complex128(self):
obj = pd.Series([1 + 1j, np.nan, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp =
|
pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
|
pandas.Series
|
# coding: utf-8
# In[30]:
import csv
import math
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import sklearn
import time
# In[31]:
import warnings
warnings.filterwarnings("ignore")
# # create binary datasets
# In[32]:
def folder(f_name): #this function creates a folder.
try:
if not os.path.exists(f_name):
os.makedirs(f_name)
except OSError:
print ("The folder could not be created!")
# In[33]:
def target_name(name):
df =
|
pd.read_csv(name,usecols=["Label"])
|
pandas.read_csv
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# import warnings
# warnings.filterwarnings('ignore')
# In[2]:
# import libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import sparse
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import uniform
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import CalibratedClassifierCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from catboost import CatBoostClassifier
import pickle
# # Amazon Employee Access Challenge
# In[3]:
train = pd.read_csv('data/train.csv')
test = pd.read_csv('data/test.csv')
# In[4]:
train.shape
# In[5]:
test.shape
# In[6]:
y_train = train['ACTION']
# In[7]:
y_train.shape
# In[8]:
train_data = train.drop('ACTION', axis=1)
train_data.shape
# In[9]:
test_data = test.drop('id', axis=1)
test_data.shape
# ## Common Variables
# In[10]:
# define variables
random_state = 42
cv = 5
scoring = 'roc_auc'
verbose=2
# ## Common functions
# In[11]:
def save_submission(predictions, filename):
'''
Save predictions into csv file
'''
global test
submission = pd.DataFrame()
submission["Id"] = test["id"]
submission["ACTION"] = predictions
filepath = "result/sampleSubmission_"+filename
submission.to_csv(filepath, index = False)
# In[12]:
def print_graph(results, param1, param2, xlabel, ylabel, title='Plot showing the ROC_AUC score for various hyper parameter values'):
'''
Plot the graph
'''
plt.plot(results[param1],results[param2]);
plt.grid();
plt.xlabel(xlabel);
plt.ylabel(ylabel);
plt.title(title);
# In[13]:
def get_rf_params():
'''
Return dictionary of parameters for random forest
'''
params = {
'n_estimators':[10,20,50,100,200,500,700,1000],
'max_depth':[1,2,5,10,12,15,20,25],
'max_features':[1,2,3,4,5],
'min_samples_split':[2,5,7,10,20]
}
return params
# In[14]:
def get_xgb_params():
'''
Return dictionary of parameters for xgboost
'''
params = {
'n_estimators': [10,20,50,100,200,500,750,1000],
'learning_rate': uniform(0.01, 0.6),
'subsample': uniform(),
'max_depth': [3, 4, 5, 6, 7, 8, 9],
'colsample_bytree': uniform(),
'min_child_weight': [1, 2, 3, 4]
}
return params
# ### We will try following models
#
# 1. KNN
# 2. SVM
# 3. Logistic Regression
# 4. Random Forest
# 5. Xgboost
# ## Build Models on the raw data
# ## 1.1 KNN with raw features
# In[15]:
parameters={'n_neighbors':np.arange(1,100, 5)}
clf = RandomizedSearchCV(KNeighborsClassifier(n_jobs=-1),parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[16]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_n_neighbors')
results
# In[17]:
print_graph(results, 'param_n_neighbors', 'mean_test_score', 'Hyperparameter - No. of neighbors', 'Test score')
# In[18]:
best_c=best_model.best_params_['n_neighbors']
best_c
# In[19]:
model = KNeighborsClassifier(n_neighbors=best_c,n_jobs=-1)
model.fit(train_data,y_train)
# In[20]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, "knn_raw.csv")
# 
# ## 1.2 SVM with raw feature
# In[21]:
C_val = uniform(loc=0, scale=4)
model= LinearSVC(verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
parameters={'C':C_val}
clf = RandomizedSearchCV(model,parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[22]:
best_c=best_model.best_params_['C']
best_c
# In[23]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[24]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[25]:
#https://stackoverflow.com/questions/26478000/converting-linearsvcs-decision-function-to-probabilities-scikit-learn-python
model = LinearSVC(C=best_c,verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
model = CalibratedClassifierCV(model)
model.fit(train_data,y_train)
# In[26]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'svm_raw.csv')
# 
# ## 1.3 Logistic Regression with Raw Feature
# In[27]:
C_val = uniform(loc=0, scale=4)
lr= LogisticRegression(verbose=verbose,random_state=random_state,class_weight='balanced',solver='lbfgs',max_iter=500,n_jobs=-1)
parameters={'C':C_val}
clf = RandomizedSearchCV(lr,parameters,random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[28]:
best_c=best_model.best_params_['C']
best_c
# In[29]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[30]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[31]:
model = LogisticRegression(C=best_c,verbose=verbose,n_jobs=-1,random_state=random_state,class_weight='balanced',solver='lbfgs')
model.fit(train_data,y_train)
# In[32]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'lr_raw.csv')
# 
# ## 1.4 Random Forest with Raw Feature
# In[33]:
rfc = RandomForestClassifier(random_state=random_state,class_weight='balanced',n_jobs=-1)
clf = RandomizedSearchCV(rfc,get_rf_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[34]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_rf_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[35]:
n_estimators=clf.best_params_['n_estimators']
max_features=clf.best_params_['max_features']
max_depth=clf.best_params_['max_depth']
min_samples_split=clf.best_params_['min_samples_split']
n_estimators,max_features,max_depth,min_samples_split
# In[36]:
model=RandomForestClassifier(n_estimators=n_estimators,max_depth=max_depth,max_features=max_features,
min_samples_split=min_samples_split,
random_state=random_state,class_weight='balanced',n_jobs=-1)
model.fit(train_data,y_train)
# In[37]:
features=train_data.columns
importance=model.feature_importances_
features=pd.DataFrame({'features':features,'value':importance})
features=features.sort_values('value',ascending=False)
sns.barplot('value','features',data=features);
plt.title('Feature Importance');
# ## Features Observations:
#
# 1. MGR_ID is the most important feature followed by RESOURCE and ROLE_DEPTNAME
# In[38]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'rf_raw.csv')
# 
# ## 1.5 Xgboost with Raw Feature
# In[39]:
xgb = XGBClassifier()
clf = RandomizedSearchCV(xgb,get_xgb_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model=clf.fit(train_data,y_train)
# In[40]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_xgb_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[41]:
colsample_bytree = clf.best_params_['colsample_bytree']
learning_rate=clf.best_params_['learning_rate']
max_depth=clf.best_params_['max_depth']
min_child_weight=clf.best_params_['min_child_weight']
n_estimators=clf.best_params_['n_estimators']
subsample=clf.best_params_['subsample']
colsample_bytree,learning_rate,max_depth,min_child_weight,n_estimators,subsample
# In[42]:
model = XGBClassifier(colsample_bytree=colsample_bytree,learning_rate=learning_rate,max_depth=max_depth,
min_child_weight=min_child_weight,n_estimators=n_estimators,subsample=subsample,n_jobs=-1)
model.fit(train_data,y_train)
# In[43]:
features=train_data.columns
importance=model.feature_importances_
features=pd.DataFrame({'features':features,'value':importance})
features=features.sort_values('value',ascending=False)
sns.barplot('value','features',data=features);
plt.title('Feature Importance');
# In[44]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'xgb_raw.csv')
# 
# 
# In[45]:
from prettytable import PrettyTable
x = PrettyTable(['Model', 'Feature', 'Private Score', 'Public Score'])
x.add_row(['KNN','Raw', 0.67224, 0.68148])
x.add_row(['SVM', 'Raw', 0.50286, 0.51390])
x.add_row(['Logistic Regression', 'Raw', 0.53857, 0.53034])
x.add_row(['Random Forest', 'Raw', 0.87269, 0.87567])
x.add_row(['Xgboost', 'Raw', 0.86988, 0.87909])
print(x)
# # Observations:
#
# 1. Xgboost perform best on the raw features
# 2. Random forest also perform good on raw features
# 3. Tree based models performs better than linear models for raw features
# ## Build model on one hot encoded features
# ### 2.1 KNN with one hot encoded features
# In[46]:
train_ohe = sparse.load_npz('data/train_ohe.npz')
test_ohe = sparse.load_npz('data/test_ohe.npz')
train_ohe.shape, test_ohe.shape, y_train.shape
# In[47]:
parameters={'n_neighbors':np.arange(1,100, 5)}
clf = RandomizedSearchCV(KNeighborsClassifier(n_jobs=-1),parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=4)
best_model = clf.fit(train_ohe,y_train)
# In[48]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_n_neighbors')
results
# In[49]:
print_graph(results, 'param_n_neighbors', 'mean_test_score', 'Hyperparameter - No. of neighbors', 'Test score')
# In[50]:
best_c=best_model.best_params_['n_neighbors']
best_c
# In[51]:
model = KNeighborsClassifier(n_neighbors=best_c,n_jobs=-1)
model.fit(train_ohe,y_train)
# In[52]:
predictions = model.predict_proba(test_ohe)[:,1]
save_submission(predictions, "knn_ohe.csv")
# 
# ## 2.2 SVM with one hot encoded features
# In[53]:
C_val = uniform(loc=0, scale=4)
model= LinearSVC(verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
parameters={'C':C_val}
clf = RandomizedSearchCV(model,parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_ohe,y_train)
# In[54]:
best_c=best_model.best_params_['C']
best_c
# In[55]:
results =
|
pd.DataFrame.from_dict(best_model.cv_results_)
|
pandas.DataFrame.from_dict
|
import pandas as pd
import time
# Need to open original file, filter out non class1
phospho_file = input('Enter phospho filepath: (default: Phospho (STY)Sites.txt) ') or 'Phospho (STY)Sites.txt'
PAF_dataset_file = input('Enter dbPAF phosphosite dataset: (default: RAT.elm) ') or 'RAT.elm'
localiation_cutoff = float(input('Enter Localization prob cutoff: (default: .75) ') or .75)
if phospho_file.endswith('.txt'):
phospho_df = pd.read_table(phospho_file, dtype=object)
elif phospho_file.endswith('.xlsx'):
phospho_df = pd.read_excel(phospho_file)
elif phospho_file.endswith('.csv'):
phospho_df = pd.read_csv(phospho_file)
else:
raise Exception('Please use tab-delimited (.txt), .xlsx or .csv')
if PAF_dataset_file.endswith('.txt'):
PAF_df = pd.read_table(PAF_dataset_file, dtype=object)
elif PAF_dataset_file.endswith('.xlsx'):
PAF_df = pd.read_excel(PAF_dataset_file)
elif PAF_dataset_file.endswith('.csv'):
PAF_df =
|
pd.read_csv(PAF_dataset_file)
|
pandas.read_csv
|
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
import pandas._testing as tm
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
data_dict = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
@pytest.fixture(params=sorted(data_dict.keys()))
def item(self, request):
key = request.param
return key, data_dict[key]
item2 = item
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, Index):
assert obj.dtype == label
elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self, item):
# to confirm test case covers intended dtypes
typ, vals = item
self._check_expected_dtype(Index(vals), typ)
self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self, item):
# GH 13660
typ1, vals1 = item
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = Categorical(list(vals1) + list(vals2))
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="y")
res = i1.append(i2)
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# index.append name match
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="x")
res = i1.append(i2)
exp = Index(exp_data, name="x")
tm.assert_index_equal(res, exp)
# cannot append non-index
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append(vals2)
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append([Index(vals2), vals3])
# ----- Series ----- #
# series.append
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
exp = Series(exp_data3)
tm.assert_series_equal(res, exp)
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
# name mismatch
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="y")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# name match
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="x")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data, name="x")
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# cannot append non-index
msg = (
r"cannot concatenate object of type '.+'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append(vals2)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append([Series(vals2), vals3])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), vals2])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), Series(vals2), vals3])
def test_concatlike_dtypes_coercion(self, item, item2, request):
# GH 13660
typ1, vals1 = item
typ2, vals2 = item2
vals3 = vals2
# basically infer
exp_index_dtype = None
exp_series_dtype = None
if typ1 == typ2:
# same dtype is tested in test_concatlike_same_dtypes
return
elif typ1 == "category" or typ2 == "category":
# The `vals1 + vals2` below fails bc one of these is a Categorical
# instead of a list; we have separate dedicated tests for categorical
return
warn = None
# specify expected dtype
if typ1 == "bool" and typ2 in ("int64", "float64"):
# series coerces to numeric based on numpy rule
# index doesn't because bool is object dtype
exp_series_dtype = typ2
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif typ2 == "bool" and typ1 in ("int64", "float64"):
exp_series_dtype = typ1
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif (
typ1 == "datetime64[ns, US/Eastern]"
or typ2 == "datetime64[ns, US/Eastern]"
or typ1 == "timedelta64[ns]"
or typ2 == "timedelta64[ns]"
):
exp_index_dtype = object
exp_series_dtype = object
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# ----- Series ----- #
# series._append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(
[Series(vals2), Series(vals3)], ignore_index=True
)
exp = Series(exp_data3, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp)
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
def test_concatlike_common_coerce_to_pandas_object(self):
# GH 13626
# result must be Timestamp/Timedelta, not datetime.datetime/timedelta
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"])
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
]
)
res = dti.append(tdi)
tm.assert_index_equal(res, exp)
assert isinstance(res[0], pd.Timestamp)
assert isinstance(res[-1], pd.Timedelta)
dts = Series(dti)
tds = Series(tdi)
res = dts._append(tds)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
res = pd.concat([dts, tds])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
def test_concatlike_datetimetz(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 7795
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz=tz)
exp = pd.DatetimeIndex(
["2011-01-01", "2011-01-02", "2012-01-01", "2012-01-02"], tz=tz
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
@pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo", "EST5EDT"])
def test_concatlike_datetimetz_short(self, tz):
# GH#7795
ix1 = pd.date_range(start="2014-07-15", end="2014-07-17", freq="D", tz=tz)
ix2 = pd.DatetimeIndex(["2014-07-11", "2014-07-21"], tz=tz)
df1 = DataFrame(0, index=ix1, columns=["A", "B"])
df2 = DataFrame(0, index=ix2, columns=["A", "B"])
exp_idx = pd.DatetimeIndex(
["2014-07-15", "2014-07-16", "2014-07-17", "2014-07-11", "2014-07-21"],
tz=tz,
)
exp = DataFrame(0, index=exp_idx, columns=["A", "B"])
tm.assert_frame_equal(df1._append(df2), exp)
tm.assert_frame_equal(pd.concat([df1, df2]), exp)
def test_concatlike_datetimetz_to_object(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 13660
# different tz coerces to object
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"])
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01"),
pd.Timestamp("2012-01-02"),
],
dtype=object,
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# different tz
dti3 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="US/Pacific")
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01", tz="US/Pacific"),
pd.Timestamp("2012-01-02", tz="US/Pacific"),
],
dtype=object,
)
res = dti1.append(dti3)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts3 = Series(dti3)
res = dts1._append(dts3)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts3])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period(self):
# GH 13660
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01", "2012-02"], freq="M")
exp = pd.PeriodIndex(["2011-01", "2011-02", "2012-01", "2012-02"], freq="M")
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1._append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_diff_freq_to_object(self):
# GH 13221
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01-01", "2012-02-01"], freq="D")
exp = Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2012-01-01", freq="D"),
pd.Period("2012-02-01", freq="D"),
],
dtype=object,
)
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1._append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_mixed_dt_to_object(self):
# GH 13221
# different datetimelike
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
],
dtype=object,
)
res = pi1.append(tdi)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
tds = Series(tdi)
res = ps1._append(tds)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, tds])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# inverse
exp = Index(
[
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
],
dtype=object,
)
res = tdi.append(pi1)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
tds = Series(tdi)
res = tds._append(ps1)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([tds, ps1])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concat_categorical(self):
# GH 13524
# same categories -> category
s1 = Series([1, 2, np.nan], dtype="category")
s2 = Series([2, 1, 2], dtype="category")
exp = Series([1, 2, np.nan, 2, 1, 2], dtype="category")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
# partially different categories => not-category
s1 = Series([3, 2], dtype="category")
s2 = Series([2, 1], dtype="category")
exp = Series([3, 2, 2, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
# completely different categories (same dtype) => not-category
s1 = Series([10, 11, np.nan], dtype="category")
s2 = Series([np.nan, 1, 3, 2], dtype="category")
exp = Series([10, 11, np.nan, np.nan, 1, 3, 2], dtype=np.float64)
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
def test_union_categorical_same_categories_different_order(self):
# https://github.com/pandas-dev/pandas/issues/19096
a = Series(Categorical(["a", "b", "c"], categories=["a", "b", "c"]))
b = Series(Categorical(["a", "b", "c"], categories=["b", "a", "c"]))
result = pd.concat([a, b], ignore_index=True)
expected = Series(
Categorical(["a", "b", "c", "a", "b", "c"], categories=["a", "b", "c"])
)
tm.assert_series_equal(result, expected)
def test_concat_categorical_coercion(self):
# GH 13524
# category + not-category => not-category
s1 = Series([1, 2, np.nan], dtype="category")
s2 = Series([2, 1, 2])
exp = Series([1, 2, np.nan, 2, 1, 2], dtype=np.float64)
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
# result shouldn't be affected by 1st elem dtype
exp = Series([2, 1, 2, 1, 2, np.nan], dtype=np.float64)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
# all values are not in category => not-category
s1 = Series([3, 2], dtype="category")
s2 = Series([2, 1])
exp = Series([3, 2, 2, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
exp = Series([2, 1, 3, 2])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
# completely different categories => not-category
s1 = Series([10, 11, np.nan], dtype="category")
s2 = Series([1, 3, 2])
exp = Series([10, 11, np.nan, 1, 3, 2], dtype=np.float64)
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
exp = Series([1, 3, 2, 10, 11, np.nan], dtype=np.float64)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
# different dtype => not-category
s1 = Series([10, 11, np.nan], dtype="category")
s2 = Series(["a", "b", "c"])
exp = Series([10, 11, np.nan, "a", "b", "c"])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
exp = Series(["a", "b", "c", 10, 11, np.nan])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
# if normal series only contains NaN-likes => not-category
s1 = Series([10, 11], dtype="category")
s2 = Series([np.nan, np.nan, np.nan])
exp = Series([10, 11, np.nan, np.nan, np.nan])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
exp = Series([np.nan, np.nan, np.nan, 10, 11])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
def test_concat_categorical_3elem_coercion(self):
# GH 13524
# mixed dtypes => not-category
s1 = Series([1, 2, np.nan], dtype="category")
s2 = Series([2, 1, 2], dtype="category")
s3 = Series([1, 2, 1, 2, np.nan])
exp = Series([1, 2, np.nan, 2, 1, 2, 1, 2, 1, 2, np.nan], dtype="float")
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp)
exp = Series([1, 2, 1, 2, np.nan, 1, 2, np.nan, 2, 1, 2], dtype="float")
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp)
# values are all in either category => not-category
s1 = Series([4, 5, 6], dtype="category")
s2 = Series([1, 2, 3], dtype="category")
s3 = Series([1, 3, 4])
exp = Series([4, 5, 6, 1, 2, 3, 1, 3, 4])
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp)
exp = Series([1, 3, 4, 4, 5, 6, 1, 2, 3])
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp)
# values are all in either category => not-category
s1 = Series([4, 5, 6], dtype="category")
s2 = Series([1, 2, 3], dtype="category")
s3 = Series([10, 11, 12])
exp = Series([4, 5, 6, 1, 2, 3, 10, 11, 12])
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp)
exp = Series([10, 11, 12, 4, 5, 6, 1, 2, 3])
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp)
def test_concat_categorical_multi_coercion(self):
# GH 13524
s1 = Series([1, 3], dtype="category")
s2 = Series([3, 4], dtype="category")
s3 = Series([2, 3])
s4 = Series([2, 2], dtype="category")
s5 = Series([1, np.nan])
s6 = Series([1, 3, 2], dtype="category")
# mixed dtype, values are all in categories => not-category
exp = Series([1, 3, 3, 4, 2, 3, 2, 2, 1, np.nan, 1, 3, 2])
res = pd.concat([s1, s2, s3, s4, s5, s6], ignore_index=True)
tm.assert_series_equal(res, exp)
res = s1._append([s2, s3, s4, s5, s6], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series([1, 3, 2, 1, np.nan, 2, 2, 2, 3, 3, 4, 1, 3])
res = pd.concat([s6, s5, s4, s3, s2, s1], ignore_index=True)
tm.assert_series_equal(res, exp)
res = s6._append([s5, s4, s3, s2, s1], ignore_index=True)
tm.assert_series_equal(res, exp)
def test_concat_categorical_ordered(self):
# GH 13524
s1 = Series(Categorical([1, 2, np.nan], ordered=True))
s2 = Series(Categorical([2, 1, 2], ordered=True))
exp = Series(Categorical([1, 2, np.nan, 2, 1, 2], ordered=True))
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
exp = Series(
|
Categorical([1, 2, np.nan, 2, 1, 2, 1, 2, np.nan], ordered=True)
|
pandas.Categorical
|
import os
import joblib
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from Fuzzy_clustering.version2.common_utils.logging import create_logger
from Fuzzy_clustering.version2.dataset_manager.common_utils import check_empty_nwp
from Fuzzy_clustering.version2.dataset_manager.common_utils import rescale_mean
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_2d_dense
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_3d
class DatasetCreatorDense:
def __init__(self, projects_group, projects, data, path_nwp, nwp_model, nwp_resolution, data_variables, njobs=1,
test=False, dates=None):
self.projects = projects
self.is_for_test = test
self.projects_group = projects_group
self.data = data
self.path_nwp = path_nwp
self.nwp_model = nwp_model
self.nwp_resolution = nwp_resolution
self.compress = True if self.nwp_resolution == 0.05 else False
self.n_jobs = njobs
self.variables = data_variables
self.logger = create_logger(logger_name=__name__, abs_path=self.path_nwp,
logger_path=f'log_{self.projects_group}.log', write_type='a')
if not self.data is None:
self.dates = self.check_dates()
elif not dates is None:
self.dates = dates
def check_dates(self):
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates are checked. Number of time samples is %s', str(len(dates)))
return pd.DatetimeIndex(dates)
def correct_nwps(self, nwp, variables):
if nwp['lat'].shape[0] == 0:
area_group = self.projects[0]['static_data']['area_group']
resolution = self.projects[0]['static_data']['NWP_resolution']
nwp['lat'] = np.arange(area_group[0][0], area_group[1][0] + resolution / 2,
resolution).reshape(-1, 1)
nwp['long'] = np.arange(area_group[0][1], area_group[1][1] + resolution / 2,
resolution).reshape(-1, 1).T
for var in nwp.keys():
if not var in {'lat', 'long'}:
if nwp['lat'].shape[0] != nwp[var].shape[0]:
nwp[var] = nwp[var].T
if 'WS' in variables and not 'WS' in nwp.keys():
if 'Uwind' in nwp.keys() and 'Vwind' in nwp.keys():
if nwp['Uwind'].shape[0] > 0 and nwp['Vwind'].shape[0] > 0:
r2d = 45.0 / np.arctan(1.0)
wspeed = np.sqrt(np.square(nwp['Uwind']) + np.square(nwp['Vwind']))
wdir = np.arctan2(nwp['Uwind'], nwp['Vwind']) * r2d + 180
nwp['WS'] = wspeed
nwp['WD'] = wdir
if 'Temp' in nwp.keys():
nwp['Temperature'] = nwp['Temp']
del nwp['Temp']
return nwp
def stack_by_sample(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables, predictions):
timestep = 60
x = dict()
y = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
preds = predictions[project['_id']]
hor = preds.columns[-1] + timestep
p_dates = [t + pd.DateOffset(minutes=hor)]
preds = preds.loc[t].to_frame().T
dates_pred = [t + pd.DateOffset(minutes=h) for h in preds.columns]
pred = pd.DataFrame(preds.values.ravel(), index=dates_pred, columns=[project['_id']])
data_temp = pd.concat([data[project['_id']].iloc[np.where(data.index < t)].to_frame(), pred])
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
y[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
try:
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data_temp.loc[(date - pd.DateOffset(hours=1))].values
inp['Obs_lag2'] = data_temp.loc[(date - pd.DateOffset(hours=2))].values
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
y[project_id] = pd.concat([y[project_id], pd.DataFrame(data.loc[date, project_id],
columns=['target'],
index=[date])])
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data_temp.loc[(date - pd.DateOffset(hours=1)), project_id].values
inp['Obs_lag2'] = data_temp.loc[(date - pd.DateOffset(hours=2)), project_id].values
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
y[project['_id']] = pd.concat(
[y[project['_id']], pd.DataFrame(data.loc[date, project['_id']],
columns=['target'], index=[date])])
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
for project in projects:
if len(x_3d[project['_id']].shape) == 3:
x_3d[project['_id']] = x_3d[project['_id']][np.newaxis, :, :, :]
return x, y, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables):
x = dict()
y = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
if project['static_data']['horizon'] == 'day_ahead':
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
else:
p_dates = pd.date_range(t + pd.DateOffset(hours=1), t + pd.DateOffset(hours=24), freq='H')
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
y[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
if not self.is_for_test:
inp['Obs_lag1'] = inp['Obs_lag1'] + np.random.normal(0, 0.05) * inp['Obs_lag1']
inp['Obs_lag2'] = inp['Obs_lag2'] + np.random.normal(0, 0.05) * inp['Obs_lag2']
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
y[project_id] = pd.concat([y[project_id], pd.DataFrame(data.loc[date, project_id],
columns=['target'],
index=[date])])
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date -
|
pd.DateOffset(hours=2)
|
pandas.DateOffset
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 14 17:29:16 2018
@author: jdkern
"""
from __future__ import division
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
def exchange(year):
df_data = pd.read_csv('../Stochastic_engine/Synthetic_demand_pathflows/Load_Path_Sim.csv',header=0)
c = ['Path66_sim','Path46_sim','Path61_sim','Path42_sim','Path24_sim','Path45_sim']
df_data = df_data[c]
paths = ['Path66','Path46','Path61','Path42','Path24','Path45']
df_data.columns = paths
df_data = df_data.loc[year*365:year*365+364,:]
# select dispatchable imports (positve flow days)
imports = df_data
imports = imports.reset_index()
for p in paths:
for i in range(0,len(imports)):
if p == 'Path42':
if imports.loc[i,p] >= 0:
imports.loc[i,p] = 0
else:
imports.loc[i,p] = -imports.loc[i,p]
elif p == 'Path46':
if imports.loc[i,p] < 0:
imports.loc[i,p] = 0
else:
imports.loc[i,p] = imports.loc[i,p]*.404 + 424
else:
if imports.loc[i,p] < 0:
imports.loc[i,p] = 0
imports.rename(columns={'Path46':'Path46_SCE'}, inplace=True)
imports.to_csv('Path_setup/CA_imports.csv')
# convert to minimum flow time series and dispatchable (daily)
df_mins = pd.read_excel('Path_setup/CA_imports_minflow_profiles.xlsx',header=0)
lines = ['Path66','Path46_SCE','Path61','Path42']
for i in range(0,len(df_data)):
for L in lines:
if df_mins.loc[i,L] >= imports.loc[i,L]:
df_mins.loc[i,L] = imports.loc[i,L]
imports.loc[i,L] = 0
else:
imports.loc[i,L] = np.max((0,imports.loc[i,L]-df_mins.loc[i,L]))
dispatchable_imports = imports*24
dispatchable_imports.to_csv('Path_setup/CA_dispatchable_imports.csv')
df_data = pd.read_csv('Path_setup/CA_imports.csv',header=0)
# hourly minimum flow for paths
hourly = np.zeros((8760,len(lines)))
for i in range(0,365):
for L in lines:
index = lines.index(L)
hourly[i*24:i*24+24,index] = np.min((df_mins.loc[i,L], df_data.loc[i,L]))
H = pd.DataFrame(hourly)
H.columns = ['Path66','Path46_SCE','Path61','Path42']
H.to_csv('Path_setup/CA_path_mins.csv')
# hourly exports
df_data = pd.read_csv('../Stochastic_engine/Synthetic_demand_pathflows/Load_Path_Sim.csv',header=0)
c = ['Path66_sim','Path46_sim','Path61_sim','Path42_sim','Path24_sim','Path45_sim']
df_data = df_data[c]
df_data.columns = [paths]
df_data = df_data.loc[year*365:year*365+364,:]
df_data = df_data.reset_index()
e = np.zeros((8760,4))
#Path 42
path_profiles = pd.read_excel('Path_setup/CA_path_export_profiles.xlsx',sheet_name='Path42',header=None)
pp = path_profiles.values
for i in range(0,len(df_data)):
if df_data.loc[i,'Path42'].values > 0:
e[i*24:i*24+24,0] = pp[i,:]*df_data.loc[i,'Path42'].values
#Path 24
path_profiles = pd.read_excel('Path_setup/CA_path_export_profiles.xlsx',sheet_name='Path24',header=None)
pp = path_profiles.values
for i in range(0,len(df_data)):
if df_data.loc[i,'Path24'].values < 0:
e[i*24:i*24+24,1] = pp[i,:]*df_data.loc[i,'Path24'].values*-1
#Path 45
path_profiles = pd.read_excel('Path_setup/CA_path_export_profiles.xlsx',sheet_name='Path45',header=None)
pp = path_profiles.values
for i in range(0,len(df_data)):
if df_data.loc[i,'Path45'].values < 0:
e[i*24:i*24+24,2] = pp[i,:]*df_data.loc[i,'Path45'].values*-1
#Path 66
path_profiles = pd.read_excel('Path_setup/CA_path_export_profiles.xlsx',sheet_name='Path66',header=None)
pp = path_profiles.values
for i in range(0,len(df_data)):
if df_data.loc[i,'Path66'].values < 0:
e[i*24:i*24+24,2] = pp[i,:]*df_data.loc[i,'Path66'].values*-1
e = e*24
exports = pd.DataFrame(e)
exports.columns = ['Path42','Path24','Path45','Path66']
exports.to_csv('Path_setup/CA_exports.csv')
# HYDRO
forecast_days = ['fd1','fd2','fd3','fd4','fd5','fd6','fd7']
# convert to minimum flow time series and dispatchable (daily)
df_PGE= pd.read_csv('../Stochastic_engine/CA_hydropower/PGE_valley_hydro.csv',header=0,index_col=0)
df_SCE= pd.read_csv('../Stochastic_engine/CA_hydropower/SCE_hydro.csv',header=0,index_col=0)
PGE_hydro = df_PGE.loc[year*365:year*365+364,:]
SCE_hydro = df_SCE.loc[year*365:year*365+364,:]
PGE_hydro = PGE_hydro.reset_index(drop=True)
SCE_hydro = SCE_hydro.reset_index(drop=True)
PGE_hydro=PGE_hydro.values*(1/.837)
SCE_hydro=SCE_hydro.values*(1/.8016)
df_mins = pd.read_excel('Hydro_setup/Minimum_hydro_profiles.xlsx',header=0)
for i in range(0,len(PGE_hydro)):
for fd in forecast_days:
fd_index = forecast_days.index(fd)
if df_mins.loc[i,'PGE_valley']*24 >= PGE_hydro[i,fd_index]:
df_mins.loc[i,'PGE_valley'] = PGE_hydro[i,fd_index]/24
PGE_hydro[i,fd_index] = 0
else:
PGE_hydro[i,fd_index] = np.max((0,PGE_hydro[i,fd_index]-df_mins.loc[i,'PGE_valley']*24))
if df_mins.loc[i,'SCE']*24 >= SCE_hydro[i,fd_index]:
df_mins.loc[i,'SCE'] = SCE_hydro[i,fd_index]/24
SCE_hydro[i,fd_index] = 0
else:
SCE_hydro[i,fd_index] = np.max((0,SCE_hydro[i,fd_index]-df_mins.loc[i,'SCE']*24))
dispatchable_PGE = pd.DataFrame(PGE_hydro)
dispatchable_PGE.columns = forecast_days
dispatchable_PGE.to_csv('Hydro_setup/CA_dispatchable_PGE.csv')
dispatchable_SCE =
|
pd.DataFrame(SCE_hydro)
|
pandas.DataFrame
|
###############################################################################
# title: 04-predictive-model.py
# created on: May 13, 2021
# summary: using information from threads to predict high volatility
###############################################################################
import pandas as pd
import numpy as np
import datetime
import math
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
# idea: can we use information from the threads and topics to predict
# # whether the thread was posted when BTC had high volatility?
###############################
# get 30-day rolling volatility
# from Jasper's notebook :D
###############################
btc = pd.read_csv('bpi.csv')
Day = btc['Date']
def str_to_time(elem):
day = datetime.datetime.strptime(elem, '%Y-%m-%d')
return day
btc['Date'] = btc['Date'].apply(str_to_time)
btc = btc.set_index('Date')
ch_btc = [math.nan]
ch_btc_pct = [math.nan]
for i in range(1, len(btc['BPI'])):
ch_btc.append(btc['BPI'].iloc[i]-btc['BPI'].iloc[i-1])
ch_btc_pct.append((btc['BPI'].iloc[i]/btc['BPI'].iloc[i-1])-1)
vola = pd.DataFrame(ch_btc_pct).rolling(30).std()*np.sqrt(30)
vola.index = btc.index
########################
# define high volatility
########################
# high volatility: above 30%
# def is_high(x):
# if math.isnan(x):
# return np.nan
# else:
# return (x > 0.3) + 0
vola['high'] = vola.apply(lambda x: (x > 0.3) + 0)
# what to do for the nan? for now leave it, we will just take it out of the modeling step
#vola.apply(lambda x: (x > 0.3)+0 if not math.isnan(x) else np.nan)
vola['high'].value_counts()
# 0 1618
# 1 209
# nice.
####################################
# Get the Dataset for Classification
####################################
# ok, now we want to join the topic dataframe with this dataframe on date
# give the column 'Day' to vola
vola.index = range(len(vola))
vola['Day'] = Day
vola.columns = ['volatility', 'high', 'Day']
# the threads
df = pd.read_csv('df_final.csv')
# again we remove the null values of df
missing = (df["text"].isnull()) | ((df["text"] == '[deleted]')) | ((df["text"] == '[removed]'))
df = df.loc[~missing]
feature_df = pd.read_csv('nlp-data/feature_df.csv', index_col = 0)
sentiment = pd.read_csv('nlp-data/sentiment.csv', index_col = 0)
tsne_df = pd.read_csv('nlp-data/df_topics_5.csv', index_col = 0)
# select some columns of the tsne_df
tsne_df = tsne_df[['id', 'topic_0', 'topic_1', 'topic_2',
'topic_3', 'topic_4']]
dominant_topic = tsne_df[['topic_' + str(c) for c in range(5)]].idxmax(axis=1)
# add sentiment to the df
df['sentiment'] = sentiment
# merge
full_df_1 = pd.merge(df, feature_df, on='id')
full_df = pd.merge(full_df_1, tsne_df, on = 'id')
# select the right columns to keep
keep = ['Day', 'author', 'comments', 'sentiment', 'dale_chall',
'type_token_ratio', 'characters', 'syllables', 'words', 'wordtypes',
'sentences', 'paragraphs', 'long_words', 'complex_words',
'complex_words_dc', 'tobeverb', 'auxverb', 'conjunction', 'pronoun',
'preposition', 'nominalization', 'topic_0', 'topic_1', 'topic_2',
'topic_3', 'topic_4']
full_df = full_df[keep]
merged_df = pd.merge(vola, full_df, on = 'Day')
# remove everything from 2020 onwards
merged_df = merged_df[merged_df['Day'] <'2020-01-01']
# remove the days where we have NaN volatility
merged_df = merged_df[~np.isnan(merged_df['volatility'])]
merged_df.shape # Out[125]: (143991, 28)
full_df = merged_df[['comments', 'sentiment',
'dale_chall', 'type_token_ratio', 'characters', 'syllables', 'words',
'wordtypes', 'sentences', 'paragraphs', 'long_words', 'complex_words',
'complex_words_dc', 'tobeverb', 'auxverb', 'conjunction', 'pronoun',
'preposition', 'nominalization', 'topic_0', 'topic_1', 'topic_2',
'topic_3', 'topic_4']]
##################
# Correlation Heatmap
##################
mask = np.tril(full_df.corr())
sns.heatmap(full_df.corr(), annot=False, mask=mask)
plt.show()
# unsurprisingly, the engineered features are HIGHLY correlated.
# solution? PCA
#####
# PCA
#####
X = full_df[['comments', 'sentiment', 'dale_chall', 'type_token_ratio',
'characters', 'syllables', 'words', 'wordtypes', 'sentences',
'paragraphs', 'long_words', 'complex_words', 'complex_words_dc',
'tobeverb', 'auxverb', 'conjunction', 'pronoun', 'preposition',
'nominalization']]
# NOTE: there are some NAN in X. We will do nearest neighbor interpolation
# documentation: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.interpolate.html#pandas.DataFrame.interpolate
X = X.interpolate(method="nearest")
# Create a train / test set
#sentiment = sentiment.reset_index(drop=True)
#sentiment_binary = (sentiment > 0) + 0
#sentiment_binary = np.ravel(sentiment_binary)
high = merged_df['high']
# Train / Test Split
X_train, X_test, y_train, y_test = train_test_split(X,
high,
test_size=0.2,
random_state=420)
# Fit PCA on X_train and X_test
# Scale
scaler = StandardScaler()
Z_train = scaler.fit_transform(X_train)
Z_test = scaler.fit_transform(X_test)
# Fit PCA to the Train set
pca = PCA(n_components=Z_train.shape[1], svd_solver='full')
pca.fit(Z_train)
# Transform
X_train_pca = pca.transform(Z_train)
X_test_pca = pca.transform(Z_test)
# determine variance explained
print(pca.explained_variance_ratio_)
plt.plot(range(Z_train.shape[1]), pca.explained_variance_ratio_)
plt.show()
plt.plot(range(Z_train.shape[1]), np.cumsum(pca.explained_variance_ratio_))
plt.show()
np.cumsum(pca.explained_variance_ratio_)
# 6 components explains 90% of the variance.
# so, let's take the 6 components and do PCA regression
# get the components
pca = PCA(n_components=6, svd_solver='full')
pca.fit(Z_train)
# Transform
X_train_pca = pca.transform(Z_train)
X_test_pca = pca.transform(Z_test)
# regression: sentiment vs PC's
npc = np.array(range(6)) + 1
pcnames = ['PC_' + str(i) for i in npc ]
X_train_pca =
|
pd.DataFrame(X_train_pca, columns=pcnames)
|
pandas.DataFrame
|
""" `snps`
tools for reading, writing, merging, and remapping SNPs
"""
"""
BSD 3-Clause License
Copyright (c) 2019, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from itertools import groupby, count
import os
import re
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from snps.ensembl import EnsemblRestClient
from snps.resources import Resources
from snps.io import Reader, Writer
from snps.utils import save_df_as_csv, Parallelizer, clean_str
# set version string with Versioneer
from snps._version import get_versions
import logging
logger = logging.getLogger(__name__)
__version__ = get_versions()["version"]
del get_versions
class SNPs:
def __init__(
self,
file="",
only_detect_source=False,
assign_par_snps=True,
output_dir="output",
resources_dir="resources",
deduplicate=True,
deduplicate_XY_chrom=True,
parallelize=False,
processes=os.cpu_count(),
rsids=(),
):
""" Object used to read and parse genotype / raw data files.
Parameters
----------
file : str or bytes
path to file to load or bytes to load
only_detect_source : bool
only detect the source of the data
assign_par_snps : bool
assign PAR SNPs to the X and Y chromosomes
output_dir : str
path to output directory
resources_dir : str
name / path of resources directory
deduplicate : bool
deduplicate RSIDs and make SNPs available as `duplicate_snps`
deduplicate_XY_chrom : bool
deduplicate alleles in the non-PAR regions of X and Y for males; see `discrepant_XY_snps`
parallelize : bool
utilize multiprocessing to speedup calculations
processes : int
processes to launch if multiprocessing
rsids : tuple, optional
rsids to extract if loading a VCF file
"""
self._file = file
self._only_detect_source = only_detect_source
self._snps = pd.DataFrame()
self._duplicate_snps = pd.DataFrame()
self._discrepant_XY_snps = pd.DataFrame()
self._source = ""
self._phased = False
self._build = 0
self._build_detected = False
self._output_dir = output_dir
self._resources = Resources(resources_dir=resources_dir)
self._parallelizer = Parallelizer(parallelize=parallelize, processes=processes)
if file:
d = self._read_raw_data(file, only_detect_source, rsids)
self._snps = d["snps"]
self._source = d["source"]
self._phased = d["phased"]
if not self._snps.empty:
self.sort_snps()
if deduplicate:
self._deduplicate_rsids()
self._build = self.detect_build()
if not self._build:
self._build = 37 # assume Build 37 / GRCh37 if not detected
else:
self._build_detected = True
if deduplicate_XY_chrom:
if self.determine_sex() == "Male":
self._deduplicate_XY_chrom()
if assign_par_snps:
self._assign_par_snps()
def __repr__(self):
return "SNPs({!r})".format(self._file[0:50])
@property
def source(self):
""" Summary of the SNP data source for ``SNPs``.
Returns
-------
str
"""
return self._source
@property
def snps(self):
""" Get a copy of SNPs.
Returns
-------
pandas.DataFrame
"""
return self._snps
@property
def duplicate_snps(self):
""" Get any duplicate SNPs.
A duplicate SNP has the same RSID as another SNP. The first occurrence
of the RSID is not considered a duplicate SNP.
Returns
-------
pandas.DataFrame
"""
return self._duplicate_snps
@property
def discrepant_XY_snps(self):
""" Get any discrepant XY SNPs.
A discrepant XY SNP is a heterozygous SNP in the non-PAR region of the X
or Y chromosome found during deduplication for a detected male genotype.
Returns
-------
pandas.DataFrame
"""
return self._discrepant_XY_snps
@property
def build(self):
""" Get the build of ``SNPs``.
Returns
-------
int
"""
return self._build
@property
def build_detected(self):
""" Get status indicating if build of ``SNPs`` was detected.
Returns
-------
bool
"""
return self._build_detected
@property
def assembly(self):
""" Get the assembly of ``SNPs``.
Returns
-------
str
"""
return self.get_assembly()
@property
def snp_count(self):
""" Count of SNPs.
Returns
-------
int
"""
return self.get_snp_count()
@property
def chromosomes(self):
""" Chromosomes of ``SNPs``.
Returns
-------
list
list of str chromosomes (e.g., ['1', '2', '3', 'MT'], empty list if no chromosomes
"""
return self.get_chromosomes()
@property
def chromosomes_summary(self):
""" Summary of the chromosomes of ``SNPs``.
Returns
-------
str
human-readable listing of chromosomes (e.g., '1-3, MT'), empty str if no chromosomes
"""
return self.get_chromosomes_summary()
@property
def sex(self):
""" Sex derived from ``SNPs``.
Returns
-------
str
'Male' or 'Female' if detected, else empty str
"""
sex = self.determine_sex(chrom="X")
if not sex:
sex = self.determine_sex(chrom="Y")
return sex
@property
def unannotated_vcf(self):
""" Indicates if VCF file is unannotated.
Returns
-------
bool
"""
if self.snp_count == 0 and self.source == "vcf":
return True
return False
@property
def phased(self):
""" Indicates if genotype is phased.
Returns
-------
bool
"""
return self._phased
def heterozygous_snps(self, chrom=""):
""" Get heterozygous SNPs.
Parameters
----------
chrom : str, optional
chromosome (e.g., "1", "X", "MT")
Returns
-------
pandas.DataFrame
"""
if chrom:
return self._snps.loc[
(self._snps.chrom == chrom)
& (self._snps.genotype.notnull())
& (self._snps.genotype.str.len() == 2)
& (self._snps.genotype.str[0] != self._snps.genotype.str[1])
]
else:
return self._snps.loc[
(self._snps.genotype.notnull())
& (self._snps.genotype.str.len() == 2)
& (self._snps.genotype.str[0] != self._snps.genotype.str[1])
]
def not_null_snps(self, chrom=""):
""" Get not null SNPs.
Parameters
----------
chrom : str, optional
chromosome (e.g., "1", "X", "MT")
Returns
-------
pandas.DataFrame
"""
if chrom:
return self._snps.loc[
(self._snps.chrom == chrom) & (self._snps.genotype.notnull())
]
else:
return self._snps.loc[self._snps.genotype.notnull()]
def get_summary(self):
""" Get summary of ``SNPs``.
Returns
-------
dict
summary info if ``SNPs`` is valid, else {}
"""
if not self.is_valid():
return {}
else:
return {
"source": self.source,
"assembly": self.assembly,
"build": self.build,
"build_detected": self.build_detected,
"snp_count": self.snp_count,
"chromosomes": self.chromosomes_summary,
"sex": self.sex,
}
def is_valid(self):
""" Determine if ``SNPs`` is valid.
``SNPs`` is valid when the input file has been successfully parsed.
Returns
-------
bool
True if ``SNPs`` is valid
"""
if self._snps.empty:
return False
else:
return True
def save_snps(self, filename="", vcf=False, atomic=True, **kwargs):
""" Save SNPs to file.
Parameters
----------
filename : str or buffer
filename for file to save or buffer to write to
vcf : bool
flag to save file as VCF
atomic : bool
atomically write output to a file on local filesystem
**kwargs
additional parameters to `pandas.DataFrame.to_csv`
Returns
-------
str
path to file in output directory if SNPs were saved, else empty str
"""
return Writer.write_file(
snps=self, filename=filename, vcf=vcf, atomic=atomic, **kwargs
)
def _read_raw_data(self, file, only_detect_source, rsids):
return Reader.read_file(file, only_detect_source, self._resources, rsids)
def _assign_par_snps(self):
""" Assign PAR SNPs to the X or Y chromosome using SNP position.
References
-----
1. National Center for Biotechnology Information, Variation Services, RefSNP,
https://api.ncbi.nlm.nih.gov/variation/v0/
2. Yates et. al. (doi:10.1093/bioinformatics/btu613),
`<http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613>`_
3. Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098
4. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
dbSNP: the NCBI database of genetic variation. Nucleic Acids Res. 2001 Jan 1;
29(1):308-11.
5. Database of Single Nucleotide Polymorphisms (dbSNP). Bethesda (MD): National Center
for Biotechnology Information, National Library of Medicine. dbSNP accession:
rs28736870, rs113313554, and rs758419898 (dbSNP Build ID: 151). Available from:
http://www.ncbi.nlm.nih.gov/SNP/
"""
rest_client = EnsemblRestClient(
server="https://api.ncbi.nlm.nih.gov", reqs_per_sec=1
)
for rsid in self._snps.loc[self._snps["chrom"] == "PAR"].index.values:
if "rs" in rsid:
id = rsid.split("rs")[1]
response = rest_client.perform_rest_action("/variation/v0/refsnp/" + id)
if response is not None:
for item in response["primary_snapshot_data"][
"placements_with_allele"
]:
if "NC_000023" in item["seq_id"]:
assigned = self._assign_snp(rsid, item["alleles"], "X")
elif "NC_000024" in item["seq_id"]:
assigned = self._assign_snp(rsid, item["alleles"], "Y")
else:
assigned = False
if assigned:
if not self._build_detected:
self._build = self._extract_build(item)
self._build_detected = True
break
def _assign_snp(self, rsid, alleles, chrom):
# only assign SNP if positions match (i.e., same build)
for allele in alleles:
allele_pos = allele["allele"]["spdi"]["position"]
# ref SNP positions seem to be 0-based...
if allele_pos == self._snps.loc[rsid].pos - 1:
self._snps.loc[rsid, "chrom"] = chrom
return True
return False
def _extract_build(self, item):
assembly_name = item["placement_annot"]["seq_id_traits_by_assembly"][0][
"assembly_name"
]
assembly_name = assembly_name.split(".")[0]
return int(assembly_name[-2:])
def detect_build(self):
""" Detect build of SNPs.
Use the coordinates of common SNPs to identify the build / assembly of a genotype file
that is being loaded.
Notes
-----
rs3094315 : plus strand in 36, 37, and 38
rs11928389 : plus strand in 36, minus strand in 37 and 38
rs2500347 : plus strand in 36 and 37, minus strand in 38
rs964481 : plus strand in 36, 37, and 38
rs2341354 : plus strand in 36, 37, and 38
Returns
-------
int
detected build of SNPs, else 0
References
----------
1. Yates et. al. (doi:10.1093/bioinformatics/btu613),
`<http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613>`_
2. Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098
3. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
dbSNP: the NCBI database of genetic variation. Nucleic Acids Res. 2001
Jan 1;29(1):308-11.
4. Database of Single Nucleotide Polymorphisms (dbSNP). Bethesda (MD): National Center
for Biotechnology Information, National Library of Medicine. dbSNP accession: rs3094315,
rs11928389, rs2500347, rs964481, and rs2341354 (dbSNP Build ID: 151). Available from:
http://www.ncbi.nlm.nih.gov/SNP/
"""
def lookup_build_with_snp_pos(pos, s):
try:
return s.loc[s == pos].index[0]
except:
return 0
build = 0
rsids = ["rs3094315", "rs11928389", "rs2500347", "rs964481", "rs2341354"]
df = pd.DataFrame(
{
36: [742429, 50908372, 143649677, 27566744, 908436],
37: [752566, 50927009, 144938320, 27656823, 918573],
38: [817186, 50889578, 148946169, 27638706, 983193],
},
index=rsids,
)
for rsid in rsids:
if rsid in self._snps.index:
build = lookup_build_with_snp_pos(
self._snps.loc[rsid].pos, df.loc[rsid]
)
if build:
break
return build
def get_assembly(self):
""" Get the assembly of a build.
Returns
-------
str
"""
if self._build == 37:
return "GRCh37"
elif self._build == 36:
return "NCBI36"
elif self._build == 38:
return "GRCh38"
else:
return ""
def get_snp_count(self, chrom=""):
""" Count of SNPs.
Parameters
----------
chrom : str, optional
chromosome (e.g., "1", "X", "MT")
Returns
-------
int
"""
if chrom:
return len(self._snps.loc[(self._snps.chrom == chrom)])
else:
return len(self._snps)
def get_chromosomes(self):
""" Get the chromosomes of SNPs.
Returns
-------
list
list of str chromosomes (e.g., ['1', '2', '3', 'MT'], empty list if no chromosomes
"""
if not self._snps.empty:
return list(
|
pd.unique(self._snps["chrom"])
|
pandas.unique
|
#from dependencies import *
# Standard imports
import os
import random
import pandas as pd
import numpy as np
from tqdm import tqdm
from glob import glob
# Code imports
import model
import pretrainedmodels
import pretrainedmodels.utils as utils
# Image imports
from PIL import Image
from imgaug import augmenters as iaa
# PyTorch imports
import torch
import torch.backends.cudnn as cudnn
import torchvision.transforms.functional as transforms
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torch.nn.parallel import data_parallel
from tensorboardX import SummaryWriter
from bunny import bunny
def f2_loss(logits, labels):
__small_value=1e-6
beta = 2
batch_size = logits.size()[0]
p = F.sigmoid(logits)
l = labels
num_pos = torch.sum(p, 0) + __small_value
num_pos_hat = torch.sum(l, 0) + __small_value
tp = torch.sum(l * p, 0)
precise = tp / num_pos
recall = tp / num_pos_hat
fs = (1 + beta * beta) * precise * recall / (beta * beta * precise + recall + __small_value)
loss = fs.sum() / batch_size
return (1 - loss)
# Set all torch tensors as cuda tensors
torch.set_default_tensor_type('torch.cuda.FloatTensor')
# Printing options
pd.set_option('display.max_columns', None)
# Seeds for deterministic behavior
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(1)
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
#print(pretrainedmodels.pretrained_settings)
# Neural Network training parameters
initial_learning_rate = 0.01
max_epochs = 500
cyclic_lr_epoch_period = 50
momentum = 0.9
weight_decay = 0.0005
model_name = 'resnet34'
mean = np.array(pretrainedmodels.pretrained_settings[model_name]['imagenet']['mean'])
std = np.array(pretrainedmodels.pretrained_settings[model_name]['imagenet']['std'])
input_size = np.array(pretrainedmodels.pretrained_settings[model_name]['imagenet']['input_size'])[1:]
# Miscellaneous Parameters
depths_filepath = 'data/depths.csv'
train_images_dir = 'data/train'
test_images_dir = 'data/test'
metadata_filepath = 'data/metadata.csv'
checkpoints_dir = 'checkpoints/' + model_name + '/'
log_dir = 'log/' + model_name + '/'
initial_checkpoint = ''
# Data parameters
number_of_folds = 5
train_params = {'batch_size': 32, 'shuffle': True, 'num_workers': 0, 'pin_memory': False}
validation_params = {'batch_size': 100, 'shuffle': False, 'num_workers': 0, 'pin_memory': False}
test_params = {'batch_size': 144, 'shuffle': False, 'num_workers': 0, 'pin_memory': False}
# Create directories
os.makedirs(name=checkpoints_dir, exist_ok=True)
os.makedirs(name=log_dir, exist_ok=True)
# Create summary writer for tensorboard visualization
summary = SummaryWriter(log_dir)
def test_augment(image):
x = []
tensors = []
x.append(transforms.resize(img=image, size=input_size, interpolation=Image.BILINEAR))
x.append(transforms.hflip(x[0]))
x.append(transforms.adjust_brightness(x[0], brightness_factor=0.4*np.random.random() + 0.8))
x.append(transforms.adjust_hue(x[0], hue_factor=0.2*np.random.random() - 0.1))
for i, img in enumerate(x):
tensors.append(transforms.to_tensor(img))
tensors[i] = transforms.normalize(tensors[i], mean, std).cuda()
return tensors
def validation_augment(image):
x = []
tensors = []
x.append(transforms.resize(img=image, size=input_size, interpolation=Image.BILINEAR))
x.append(transforms.hflip(x[0]))
x.append(transforms.adjust_brightness(x[0], brightness_factor=0.4*np.random.random() + 0.8))
x.append(transforms.adjust_hue(x[0], hue_factor=0.2*np.random.random() - 0.1))
for i, img in enumerate(x):
tensors.append(transforms.to_tensor(img))
tensors[i] = transforms.normalize(tensors[i], mean, std).cuda()
return tensors
def train_augment(image):
x = image
x = transforms.resize(img=x, size=input_size, interpolation=Image.BILINEAR)
# Random horizontal flip
if np.random.random() >= 0.5:
x = transforms.hflip(x)
# Brightness, hue or no adjustment
c = np.random.choice(3)
if c == 0:
pass
elif c == 1:
x = transforms.adjust_brightness(x, brightness_factor=0.4*np.random.random() + 0.8)
elif c == 2:
x = transforms.adjust_hue(x, hue_factor=0.2*np.random.random() - 0.1)
x = transforms.to_tensor(x)
x = transforms.normalize(x, mean, std)
return x.cuda()
class TGS_Dataset(Dataset):
def __init__(self, data, augment=train_augment, mode='train'):
self.data = data
self.augment = augment
self.mode = mode
self.x = []
self.y = []
for i in range(len(self.data)):
img = Image.open(self.data['file_path_image'][i])
self.x.append(img)
fp = img.fp
img.load()
if self.mode != 'test':
img = Image.open(self.data['file_path_mask'][i])
label = np.array(img)
fp = img.fp
img.load()
if np.sum(label) > 0:
self.y.append(1.0)
else:
self.y.append(0.0)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
if self.mode == 'test':
return self.augment(self.x[index]), self.data['id'][index]
else:
return self.augment(self.x[index]), torch.tensor(self.y[index]).cuda()
def prepare_data():
depths = pd.read_csv(depths_filepath)
# Load train metadata
metadata = {}
for filename in tqdm(os.listdir(os.path.join(train_images_dir, 'images'))):
image_filepath = os.path.join(train_images_dir, 'images', filename)
mask_filepath = os.path.join(train_images_dir, 'masks', filename)
image_id = filename.split('.')[0]
depth = depths[depths['id'] == image_id]['z'].values[0]
# calculate salt coverage
mask = np.array(Image.open(mask_filepath))
salt_coverage = np.sum(mask > 0) / (mask.shape[0]*mask.shape[1])
metadata.setdefault('file_path_image', []).append(image_filepath)
metadata.setdefault('file_path_mask', []).append(mask_filepath)
metadata.setdefault('is_train', []).append(1)
metadata.setdefault('id', []).append(image_id)
metadata.setdefault('z', []).append(depth)
metadata.setdefault('salt_coverage', []).append(salt_coverage)
# Sort by coverage and split in n folds
data = pd.DataFrame.from_dict(metadata)
data.sort_values('salt_coverage', inplace=True)
data['fold'] = (list(range(number_of_folds))*data.shape[0])[:data.shape[0]]
# Load test metadata
metadata = {}
for filename in tqdm(os.listdir(os.path.join(test_images_dir, 'images'))):
image_filepath = os.path.join(test_images_dir, 'images', filename)
image_id = filename.split('.')[0]
depth = depths[depths['id'] == image_id]['z'].values[0]
metadata.setdefault('file_path_image', []).append(image_filepath)
metadata.setdefault('file_path_mask', []).append(None)
metadata.setdefault('fold', []).append(None)
metadata.setdefault('id', []).append(image_id)
metadata.setdefault('is_train', []).append(0)
metadata.setdefault('salt_coverage', []).append(None)
metadata.setdefault('z', []).append(depth)
data = data.append(
|
pd.DataFrame.from_dict(metadata)
|
pandas.DataFrame.from_dict
|
from snsql import *
import pandas as pd
import numpy as np
privacy = Privacy(epsilon=3.0, delta=0.1)
class TestPreAggregatedSuccess:
# Test input checks for pre_aggregated
def test_list_success(self, test_databases):
# pass in properly formatted list
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="pandas"
)
if priv:
res = priv.execute(query, pre_aggregated=pre_aggregated)
assert(str(res[1][0]) == '1') # it's sorted
def test_pandas_success(self, test_databases):
# pass in properly formatted dataframe
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
colnames = pre_aggregated[0]
pre_aggregated = pd.DataFrame(data=pre_aggregated[1:], index=None)
pre_aggregated.columns = colnames
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="pandas"
)
if priv:
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
res = priv.execute(query, pre_aggregated=pre_aggregated)
assert(str(res[1][0]) == '1') # it's sorted
def test_pandas_success_df(self, test_databases):
# pass in properly formatted dataframe
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
colnames = pre_aggregated[0]
pre_aggregated =
|
pd.DataFrame(data=pre_aggregated[1:], index=None)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
import itertools
import numpy as np
import pytest
from pandas.compat import u
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Period, Series, Timedelta, date_range)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestDataFrameReshape(TestData):
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
tm.assert_frame_equal(pivoted, expected)
# name tracking
assert pivoted.index.name == 'index'
assert pivoted.columns.name == 'columns'
# don't specify values
pivoted = frame.pivot(index='index', columns='columns')
assert pivoted.index.name == 'index'
assert pivoted.columns.names == (None, 'columns')
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with pytest.raises(ValueError, match='duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame({}, columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame()
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
tm.assert_index_equal(result.columns, Index(['A', 'B'], name=0))
def test_pivot_index_none(self):
# gh-3962
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data).set_index('index')
result = frame.pivot(columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns='columns')
expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),
('values', 'Two')],
names=[None, 'columns'])
expected.index.name = 'index'
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.name == 'index'
assert result.columns.names == (None, 'columns')
expected.columns = expected.columns.droplevel(0)
result = frame.pivot(columns='columns', values='values')
expected.columns.name = 'columns'
tm.assert_frame_equal(result, expected)
def test_stack_unstack(self):
df = self.frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, df)
assert_frame_equal(unstacked_df['bar'], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, df)
assert_frame_equal(unstacked_cols_df['bar'].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, 'a', 'b'], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0],
columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(1, index=MultiIndex.from_product([levels[0],
levels[2]]),
columns=levels[1])
assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[['a', 'b']].stack(1)
expected = expected[['a', 'b']]
assert_frame_equal(result, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack(fill_value=-1)
expected = DataFrame({'a': [1, -1, 5], 'b': [2, 4, -1]},
index=['x', 'y', 'z'], dtype=np.int16)
assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame({'a': [1, 0.5, 5], 'b': [2, 4, 0.5]},
index=['x', 'y', 'z'], dtype=np.float)
assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame({'x': ['a', 'a', 'b'],
'y': ['j', 'k', 'j'],
'z': [0, 1, 2],
'w': [0, 1, 2]}).set_index(['x', 'y', 'z'])
unstacked = df.unstack(['x', 'y'], fill_value=0)
key = ('<KEY>')
expected = unstacked[key]
result = pd.Series([0, 0, 2], index=unstacked.index, name=key)
assert_series_equal(result, expected)
stacked = unstacked.stack(['x', 'y'])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
assert_frame_equal(result, df)
# From a series
s = df['w']
result = s.unstack(['x', 'y'], fill_value=0)
expected = unstacked['w']
assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list('AB'), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
# From a mixed type dataframe
df['A'] = df['A'].astype(np.int16)
df['B'] = df['B'].astype(np.float64)
result = df.unstack(fill_value=-1)
expected['A'] = expected['A'].astype(np.int16)
expected['B'] = expected['B'].astype(np.float64)
assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.float)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = pd.date_range('2012-01-01', periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [dv[0], pd.NaT, dv[3]],
'b': [dv[1], dv[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame({'a': [dv[0], dv[0], dv[3]],
'b': [dv[1], dv[2], dv[0]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [td[0], pd.NaT, td[3]],
'b': [td[1], td[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame({'a': [td[0], td[1], td[3]],
'b': [td[1], td[2], td[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [Period('2012-01'), Period('2012-02'), Period('2012-03'),
Period('2012-04')]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [periods[0], None, periods[3]],
'b': [periods[1], periods[2], None]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame({'a': [periods[0], periods[1], periods[3]],
'b': [periods[1], periods[2], periods[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = pd.Series(['a', 'b', 'c', 'a'], dtype='category')
data.index = pd.MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')],
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame({'a': pd.Categorical(list('axa'),
categories=list('abc')),
'b': pd.Categorical(list('bcx'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
# Fill with non-category results in a TypeError
msg = r"'fill_value' \('d'\) is not in"
with pytest.raises(TypeError, match=msg):
data.unstack(fill_value='d')
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value='c')
expected = DataFrame({'a': pd.Categorical(list('aca'),
categories=list('abc')),
'b': pd.Categorical(list('bcc'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = pd.DataFrame(dict(state=['IL', 'MI', 'NC'],
index=['a', 'b', 'c'],
some_categories=pd.Series(['a', 'b', 'c']
).astype('category'),
A=np.random.rand(3),
B=1,
C='foo',
D=pd.Timestamp('20010102'),
E=pd.Series([1.0, 50.0, 100.0]
).astype('float32'),
F=pd.Series([3.0, 4.0, 5.0]).astype('float64'),
G=False,
H=pd.Series([1, 200, 923442], dtype='int8')))
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(['state', 'index'])
unstack_and_compare(df1, 'index')
df1 = df.set_index(['state', 'some_categories'])
unstack_and_compare(df1, 'some_categories')
df1 = df.set_index(['F', 'C'])
unstack_and_compare(df1, 'F')
df1 = df.set_index(['G', 'B', 'state'])
unstack_and_compare(df1, 'B')
df1 = df.set_index(['E', 'A'])
unstack_and_compare(df1, 'E')
df1 = df.set_index(['state', 'index'])
s = df1['A']
unstack_and_compare(s, 'index')
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3),
repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
assert_frame_equal(df.stack(level=[1, 2]),
df.stack(level=1).stack(level=1))
assert_frame_equal(df.stack(level=[-2, -1]),
df.stack(level=1).stack(level=1))
df_named = df.copy()
df_named.columns.set_names(range(3), inplace=True)
assert_frame_equal(df_named.stack(level=[1, 2]),
df_named.stack(level=1).stack(level=1))
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ['exp', 'animal', 1]
assert_frame_equal(df2.stack(level=['animal', 1]),
animal_hair_stacked, check_names=False)
assert_frame_equal(df2.stack(level=['exp', 1]),
exp_hair_stacked, check_names=False)
# When mixed types are passed and the ints are not level
# names, raise
msg = ("level should contain all level names or all level numbers, not"
" a mixture of the two")
with pytest.raises(ValueError, match=msg):
df2.stack(level=['animal', 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ['exp', 'animal', 0]
assert_frame_equal(df3.stack(level=['animal', 0]),
animal_hair_stacked, check_names=False)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=['exp', 'animal'])
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
assert_frame_equal(df2.stack(level=[1, 2]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 1]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 2]), exp_hair_stacked,
check_names=False)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
assert_frame_equal(df3.stack(level=[0, 1]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 0]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 1]), exp_hair_stacked,
check_names=False)
def test_unstack_bool(self):
df = DataFrame([False, False],
index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]),
columns=['col'])
rs = df.unstack()
xp = DataFrame(np.array([[False, np.nan], [np.nan, False]],
dtype=object),
index=['a', 'b'],
columns=MultiIndex.from_arrays([['col', 'col'],
['c', 'l']]))
assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = pd.MultiIndex(
levels=[[u('foo'), u('bar')], [u('one'), u('two')],
[u('a'), u('b')]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=[u('first'),
|
u('second')
|
pandas.compat.u
|
import io
import sys
import unittest
from pyalink.alink import *
from pyalink.alink.common.utils.printing import print_with_title
class TestLazyModelTrainInfo(unittest.TestCase):
def setUp(self) -> None:
self.saved_stdout = sys.stdout
self.str_out = io.StringIO()
sys.stdout = self.str_out
def tearDown(self) -> None:
sys.stdout = self.saved_stdout
print(self.str_out.getvalue())
def test_lazy_model_info(self):
import numpy as np
import pandas as pd
data = np.array([
[0, "0 0 0"],
[1, "0.1,0.1,0.1"],
[2, "0.2,0.2,0.2"],
[3, "9 9 9"],
[4, "9.1 9.1 9.1"],
[5, "9.2 9.2 9.2"]
])
df =
|
pd.DataFrame({"id": data[:, 0], "vec": data[:, 1]})
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 21 17:00:53 2017
@author: Dan
"""
# Data cleaning
#爬蟲
import pandas as pd
stockcodeurl = 'http://isin.twse.com.tw/isin/C_public.jsp?strMode=2'
rawdata =
|
pd.read_html(stockcodeurl)
|
pandas.read_html
|
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
import shutil
from tests.test_streams.base import get_streams_client, set_store
from tests.utils import BaseTestCase
from polyaxon import settings
from polyaxon.polyboard.artifacts import V1ArtifactKind
from polyaxon.polyboard.events import V1Event, V1Events
from polyaxon.polyboard.events.schemas import LoggedEventListSpec
from polyaxon.streams.app.main import STREAMS_URL
from polyaxon.utils.path_utils import create_path
class TestEventsEndpoints(BaseTestCase):
def setUp(self):
super().setUp()
self.store_root = set_store()
self.run_path = os.path.join(self.store_root, "uuid")
self.run_events = os.path.join(self.run_path, "events")
# Create run artifacts path
create_path(self.run_path)
# Create run artifacts events path
create_path(self.run_events)
# Create run events
self.create_tmp_events()
self.client = get_streams_client()
self.base_url = STREAMS_URL + "/namespace/owner/project/runs/uuid/events"
def create_tmp_events(self):
text1 = LoggedEventListSpec(
name="text1",
kind=V1ArtifactKind.TEXT,
events=[
V1Event.make(step=1, text="foo1"),
V1Event.make(step=2, text="boo2"),
],
)
self.create_kind_events(name="text1", kind=V1ArtifactKind.TEXT, events=text1)
text2 = LoggedEventListSpec(
name="text2",
kind=V1ArtifactKind.TEXT,
events=[
V1Event.make(step=1, text="foo2"),
V1Event.make(step=2, text="boo2"),
],
)
self.create_kind_events(name="text2", kind=V1ArtifactKind.TEXT, events=text2)
html1 = LoggedEventListSpec(
name="html1",
kind=V1ArtifactKind.HTML,
events=[
V1Event.make(step=1, html="foo1"),
V1Event.make(step=2, html="boo2"),
],
)
self.create_kind_events(name="html1", kind=V1ArtifactKind.HTML, events=html1)
html2 = LoggedEventListSpec(
name="htm2",
kind=V1ArtifactKind.HTML,
events=[
V1Event.make(step=1, html="foo2"),
V1Event.make(step=2, html="boo2"),
],
)
self.create_kind_events(name="html2", kind=V1ArtifactKind.HTML, events=html2)
def create_kind_events(self, name, kind, events):
event_kind_path = "{}/{}".format(self.run_events, kind)
create_path(event_kind_path)
with open("{}/{}.plx".format(event_kind_path, name), "+w") as f:
f.write(events.get_csv_header())
f.write(events.get_csv_events())
def test_download_events_cached(self):
filepath1 = os.path.join(
settings.CLIENT_CONFIG.archive_root, "uuid", "events", "text", "text1.plx"
)
filepath2 = os.path.join(
settings.CLIENT_CONFIG.archive_root, "uuid", "events", "text", "text2.plx"
)
assert os.path.exists(filepath1) is False
assert os.path.exists(filepath2) is False
response = self.client.get(self.base_url + "/text?names=text1,text2&orient=csv")
assert response.status_code == 200
assert os.path.exists(filepath1) is True
assert os.path.exists(filepath2) is True
shutil.rmtree(self.run_events)
response = self.client.get(self.base_url + "/text?names=text1,text2&orient=csv")
assert response.status_code == 200
assert os.path.exists(filepath1) is True
assert os.path.exists(filepath2) is True
shutil.rmtree(
os.path.join(settings.CLIENT_CONFIG.archive_root, "uuid", "events")
)
response = self.client.get(self.base_url + "/text?names=text1,text2&orient=csv")
assert response.status_code == 200
assert os.path.exists(filepath1) is False
assert os.path.exists(filepath2) is False
def test_download_events_cached_force(self):
filepath1 = os.path.join(
settings.CLIENT_CONFIG.archive_root, "uuid", "events", "text", "text1.plx"
)
filepath2 = os.path.join(
settings.CLIENT_CONFIG.archive_root, "uuid", "events", "text", "text2.plx"
)
assert os.path.exists(filepath1) is False
assert os.path.exists(filepath2) is False
response = self.client.get(self.base_url + "/text?names=text1,text2&orient=csv")
assert response.status_code == 200
assert os.path.exists(filepath1) is True
assert os.path.exists(filepath2) is True
shutil.rmtree(self.run_events)
response = self.client.get(
self.base_url + "/text?names=text1,text2&orient=csv&force=true"
)
assert response.status_code == 200
assert os.path.exists(filepath1) is False
assert os.path.exists(filepath2) is False
def test_download_text_events_as_csv(self):
filepath1 = os.path.join(
settings.CLIENT_CONFIG.archive_root, "uuid", "events", "text", "text1.plx"
)
filepath2 = os.path.join(
settings.CLIENT_CONFIG.archive_root, "uuid", "events", "text", "text2.plx"
)
assert os.path.exists(filepath1) is False
assert os.path.exists(filepath2) is False
response = self.client.get(self.base_url + "/text?names=text1&orient=csv")
assert response.status_code == 200
assert os.path.exists(filepath1) is True
assert os.path.exists(filepath2) is False
events1 = V1Events.read(name="text1", kind=V1ArtifactKind.TEXT, data=filepath1)
for res in response.json()["data"]:
assert isinstance(res["data"], str)
results = [V1Events.read(**i) for i in response.json()["data"]]
assert results[0].name == events1.name
assert results[0].kind == events1.kind
assert pd.DataFrame.equals(results[0].df, events1.df)
response = self.client.get(self.base_url + "/text?names=text1,text2&orient=csv")
assert response.status_code == 200
assert os.path.exists(filepath1) is True
assert os.path.exists(filepath2) is True
events2 = V1Events.read(name="text2", kind=V1ArtifactKind.TEXT, data=filepath2)
for res in response.json()["data"]:
assert isinstance(res["data"], str)
results = [V1Events.read(**i) for i in response.json()["data"]]
expected = {events1.name: events1, events2.name: events2}
for res in results:
exp = expected[res.name]
assert res.name == exp.name
assert res.kind == exp.kind
assert pd.DataFrame.equals(res.df, exp.df)
response = self.client.get(
self.base_url + "/text?names=text1,text2,text3&orient=csv"
)
assert response.status_code == 200
assert os.path.exists(filepath1) is True
assert os.path.exists(filepath2) is True
for res in response.json()["data"]:
assert isinstance(res["data"], str)
results = [V1Events.read(**i) for i in response.json()["data"]]
expected = {events1.name: events1, events2.name: events2}
for res in results:
exp = expected[res.name]
assert res.name == exp.name
assert res.kind == exp.kind
assert pd.DataFrame.equals(res.df, exp.df)
def test_download_html_events_as_csv(self):
filepath1 = os.path.join(
settings.CLIENT_CONFIG.archive_root, "uuid", "events", "html", "html1.plx"
)
filepath2 = os.path.join(
settings.CLIENT_CONFIG.archive_root, "uuid", "events", "html", "html2.plx"
)
assert os.path.exists(filepath1) is False
assert os.path.exists(filepath2) is False
response = self.client.get(self.base_url + "/html?names=html1&orient=csv")
assert response.status_code == 200
assert os.path.exists(filepath1) is True
assert os.path.exists(filepath2) is False
events1 = V1Events.read(name="html1", kind=V1ArtifactKind.HTML, data=filepath1)
for res in response.json()["data"]:
assert isinstance(res["data"], str)
results = [V1Events.read(**i) for i in response.json()["data"]]
assert results[0].name == events1.name
assert results[0].kind == events1.kind
assert pd.DataFrame.equals(results[0].df, events1.df)
response = self.client.get(self.base_url + "/html?names=html1,html2&orient=csv")
assert response.status_code == 200
assert os.path.exists(filepath1) is True
assert os.path.exists(filepath2) is True
events2 = V1Events.read(name="html2", kind=V1ArtifactKind.HTML, data=filepath2)
for res in response.json()["data"]:
assert isinstance(res["data"], str)
results = [V1Events.read(**i) for i in response.json()["data"]]
expected = {events1.name: events1, events2.name: events2}
for res in results:
exp = expected[res.name]
assert res.name == exp.name
assert res.kind == exp.kind
assert pd.DataFrame.equals(res.df, exp.df)
response = self.client.get(
self.base_url + "/html?names=text1,html1,html2&orient=csv"
)
assert response.status_code == 200
assert os.path.exists(filepath1) is True
assert os.path.exists(filepath2) is True
for res in response.json()["data"]:
assert isinstance(res["data"], str)
results = [V1Events.read(**i) for i in response.json()["data"]]
expected = {events1.name: events1, events2.name: events2}
for res in results:
exp = expected[res.name]
assert res.name == exp.name
assert res.kind == exp.kind
assert pd.DataFrame.equals(res.df, exp.df)
def test_download_text_events_as_dict(self):
filepath1 = os.path.join(
settings.CLIENT_CONFIG.archive_root, "uuid", "events", "text", "text1.plx"
)
filepath2 = os.path.join(
settings.CLIENT_CONFIG.archive_root, "uuid", "events", "text", "text2.plx"
)
assert os.path.exists(filepath1) is False
assert os.path.exists(filepath2) is False
response = self.client.get(self.base_url + "/text?names=text1&orient=dict")
assert response.status_code == 200
assert os.path.exists(filepath1) is True
assert os.path.exists(filepath2) is False
events1 = V1Events.read(
name="text1", kind=V1ArtifactKind.TEXT, data=filepath1, parse_dates=False
)
for res in response.json()["data"]:
assert isinstance(res["data"], dict)
results = [V1Events.read(**i) for i in response.json()["data"]]
assert results[0].name == events1.name
assert results[0].kind == events1.kind
assert pd.DataFrame.equals(results[0].df, events1.df)
response = self.client.get(
self.base_url + "/text?names=text1,text2&orient=dict"
)
assert response.status_code == 200
assert os.path.exists(filepath1) is True
assert os.path.exists(filepath2) is True
events2 = V1Events.read(
name="text2", kind=V1ArtifactKind.TEXT, data=filepath2, parse_dates=False
)
for res in response.json()["data"]:
assert isinstance(res["data"], dict)
results = [V1Events.read(**i) for i in response.json()["data"]]
expected = {events1.name: events1, events2.name: events2}
for res in results:
exp = expected[res.name]
assert res.name == exp.name
assert res.kind == exp.kind
assert pd.DataFrame.equals(res.df, exp.df)
response = self.client.get(
self.base_url + "/text?names=text1,text2,text3&orient=dict"
)
assert response.status_code == 200
assert os.path.exists(filepath1) is True
assert os.path.exists(filepath2) is True
for res in response.json()["data"]:
assert isinstance(res["data"], dict)
results = [V1Events.read(**i) for i in response.json()["data"]]
expected = {events1.name: events1, events2.name: events2}
for res in results:
exp = expected[res.name]
assert res.name == exp.name
assert res.kind == exp.kind
assert pd.DataFrame.equals(res.df, exp.df)
def test_download_html_events_as_dict(self):
filepath1 = os.path.join(
settings.CLIENT_CONFIG.archive_root, "uuid", "events", "html", "html1.plx"
)
filepath2 = os.path.join(
settings.CLIENT_CONFIG.archive_root, "uuid", "events", "html", "html2.plx"
)
assert os.path.exists(filepath1) is False
assert os.path.exists(filepath2) is False
response = self.client.get(self.base_url + "/html?names=html1&orient=dict")
assert response.status_code == 200
assert os.path.exists(filepath1) is True
assert os.path.exists(filepath2) is False
events1 = V1Events.read(
name="html1", kind=V1ArtifactKind.HTML, data=filepath1, parse_dates=False
)
for res in response.json()["data"]:
assert isinstance(res["data"], dict)
results = [V1Events.read(**i) for i in response.json()["data"]]
assert results[0].name == events1.name
assert results[0].kind == events1.kind
assert
|
pd.DataFrame.equals(results[0].df, events1.df)
|
pandas.DataFrame.equals
|
# Import necessary libraries
import json
import joblib
import pandas as pd
import streamlit as st
# Machine Learning
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
# Custom classes
from ..utils import isNumerical
import os
def app():
"""This application helps in running machine learning models without having to write explicit code
by the user. It runs some basic models and let's the user select the X and y variables.
"""
# Load the data
if 'main_data.csv' not in os.listdir('data'):
st.markdown("Please upload data through `Upload Data` page!")
else:
data = pd.read_csv('data/main_data.csv')
# Create the model parameters dictionary
params = {}
# Use two column technique
# col1, col2 = st.beta_columns(2)
col1, col2 = st.columns(2)
# Design column 1
y_var = col1.radio("Select the variable to be predicted (y)", options=data.columns)
# Design column 2
X_var = col2.multiselect("Select the variables to be used for prediction (X)", options=data.columns)
# Check if len of x is not zero
if len(X_var) == 0:
st.error("You have to put in some X variable and it cannot be left empty.")
# Check if y not in X
if y_var in X_var:
st.error("Warning! Y variable cannot be present in your X-variable.")
# Option to select predition type
pred_type = st.radio("Select the type of process you want to run.",
options=["Regression", "Classification"],
help="Write about reg and classification")
# Add to model parameters
params = {
'X': X_var,
'y': y_var,
'pred_type': pred_type,
}
# if st.button("Run Models"):
st.write(f"**Variable to be predicted:** {y_var}")
st.write(f"**Variable to be used for prediction:** {X_var}")
# Divide the data into test and train set
X = data[X_var]
y = data[y_var]
# Perform data imputation
# st.write("THIS IS WHERE DATA IMPUTATION WILL HAPPEN")
# Perform encoding
X =
|
pd.get_dummies(X)
|
pandas.get_dummies
|
#!/usr/bin/env python
# coding: utf-8
# # Experiments @Fischer in Montebelluna 28.02.20
# We had the oppurtunity to use the Flexometer for ski boots of Fischer with their help at Montebelluna. The idea is to validate our system acquiring simultaneously data by our sensor setup and the one from their machine. With the machine of Fischer it's possible to apply exact loads.
# We used booth our sensorized ski boots (Dynafit Hoji Pro Tour W and Dynafit TLT Speedfit). The Hoji we already used in the past for our experiments in the lab @Bz with our selfbuild experiment test bench. For the TLT Speedfit this was the first experiment.
#
# Strain gauge setup:
# - Dynafit Hoji Pro Tour: 4 pairs of strain gauges 1-4 (a=0°, b=90°)
# - Dynafit TLT Speedfit: 4 triples of strain gauges 1-4 (a=0°,b=45°,c=90°)
# As we had only a restricted time, we tested all 4 strain gauges pairs of the Hoji and only strain gauge triple 3 for TLT Speedfit. For the first time the new prototype of datalogger was running in an experiment. In addition also the first time in battery mode and not at room temperature. Unfortunately the connection of the strains to the logging system was not the best as in battery mode we don't have any possibility to control the connection to the channels yet. We'll get a solution for this the next days.
#
# Experiments (ambient temperature: 4°C):
# - #1: Hoji Pro Tour, 4a&b
# - #2: Hoji Pro Tour, 3a&b
# - #3: Hoji Pro Tour, 2a&b
# - #4: Hoji Pro Tour, 1a&b
# - #5: TLT Speedfit, 3a&b&c
#
# ATTENTION: The Hoji boot was not closed as much as the TLT. Take in consideration this when looking at force/angular displacement graph.
# In[50]:
# Importing libraries
import pandas as pd
import numpy as np
import datetime
import time
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import csv
import matplotlib.patches as mpatches #needed for plot legend
from matplotlib.pyplot import *
get_ipython().run_line_magic('matplotlib', 'inline')
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('png', 'pdf')
# # Machine Data: load and plot
# The boot was loaded cyclical by the machine with a maximum of F = 150N. In each single experiment 1-5 we exported the data of the last 5 cycles.
#
# In[51]:
#Loading data in df[expnr]: exprnr-> experiment 1-5 with cycle 1-5
expnr=5 #number of exp
cyclenr = 5 #number of cycle per experiment
colnr = 2*cyclenr #
dfm={}
for expnr in range(expnr):
d = {}
for i in range(cyclenr): #load data from cycle 1-5
d[expnr,i] = pd.DataFrame()
d[expnr,i] = pd.read_csv('ESP'+ str(expnr+1) + 'ciclo'+ str(i+1) +'.csv', sep='\t',header=None)
dfm[expnr]=pd.concat([d[expnr,0], d[expnr,1], d[expnr,2], d[expnr,3], d[expnr,4]], axis=1, join='inner')
dfm[expnr] = np.array(dfm[expnr]) #transform in np.array
for i in range(len(dfm[expnr])): #replace , with . and change format to float
for j in range(colnr):
dfm[expnr][i,j]=float(dfm[expnr][i,j].replace(',', '.'))
#print(dfm[1][:,0])
# In[52]:
figm, axm = plt.subplots(5, 5, figsize=(13, 11), sharex='col') #define plot settings
col_title = ['Experiment {}'.format(col) for col in range(1, 5)]
for i in range(expnr+1):
for j in range(cyclenr):
axm[j,i].plot(dfm[i][:,2*j+1],dfm[i][:,2*j])
axm[0,i].set_title('Experiment '+ str(i+1))
axm[j,0].set(ylabel='F[N] Cycle'+ str(j+1))
axm[4,i].set(xlabel='angle [°]')
plt.tight_layout()
figm.suptitle('Machine Data Plot (Hoji Pro Tour: 1-4, TLT Speedfit: 5)',fontsize=16)
figm.subplots_adjust(top=0.88)
# On the x-axis the force F is shown (max 150N) and on the y-axis the displacement angle alpha.
# In the plot above the columns are showing the experiment and the rows the single cycles. The cycles within the same experiment are quite similar (qualitative). It's cool how clear is the difference between the two different ski boot models we used. Experiment 1-4 is showing Dynafit Hoji Pro Tour and experiment 5 the Dynafit TLT Speedfit.
# # Calculate surface under curve
# To compare the energy release between Hoji and TLT we are going to calculate the surface in the closed curve.
# We can calculate an area under a curve (curve to x-axis) by integration (E = \int{M dphi}). Via interpolation of extracted points on the curve we generate a function which is integrated afterwards by trapezian rule to get the surface. By subtracting the surface of unloading from the one of loading the area between can be calculated, which corresponds the energy release.
# In[53]:
from scipy.interpolate import interp1d
from numpy import trapz
# Experiment data
x1=dfm[1][:,1] # Exp1 cycle 1 Hoji
y1=dfm[1][:,0] # Exp1 cycle 1 Hoji
x2=dfm[4][:,1] # Exp5 cycle 1 Hoji
y2=dfm[4][:,0] # Exp5 cycle 1 Hoji
ym1=np.array([-29,17,41.14,63,96,147.8]) # x points loading Hoji
xm1=np.array([-1.5,2.9,7.312,11,13.7,13.94]) # y points loading Hoji
ym2=np.array([-29,3.741,25,43.08,63,72,106,147.8]) # x points unloading Hoji
xm2=np.array([-1.5,-0.646,1.2,3.127,6.6,8.37,13.28,13.94]) # y points unloading Hoji
ym3=np.array([-28.5,-12.27,4.841,18.01,31.92,39.46,87.48,145.6]) # x points loading TLT
xm3=np.array([-2.752,-0.989,1.022,3.23,5.387,6.012,6.521,6.915]) # y point loading TLT
ym4=np.array([-28.5,2.042,26.35,41.36,51.86,56.33,93.87,145.6]) # x points unloading TLT
xm4=np.array([-2.752,-1.94,-0.43,1.524,3.76,5.625,6.24,6.915]) # y points unloading TLt
# Interpolation
f1 = interp1d(xm1, ym1)
f2 = interp1d(xm2, ym2)
f3 = interp1d(xm3, ym3)
f4 = interp1d(xm4, ym4)
# Plot of original data and interpolation
fig0, ax0 = plt.subplots(1, 2, figsize=(15, 8))
fig0.suptitle('Ski boot testing machine', fontsize=16)
#fig0.suptitle('Interpolation of experiment data 1&5 cycle 1 (left: Hoji, right: TLT)', fontsize=16)
ax0[0].plot(x1,y1) # loading Hoji
ax0[0].set_title('Hoji Pro Tour W')
#ax0[0].plot(xm2,ym2, 'o', xm2, f2(xm2), '-', xm2, f2(xm2), '--') # unloading Hoji
#ax0[0].plot(x1,y1,xm1,ym1, 'o', xm1, f1(xm1), '-') # loading Hoji
#ax0[0].plot(xm2,ym2, 'o', xm2, f2(xm2), '-', xm2, f2(xm2), '--') # unloading Hoji
ax0[0].set(xlabel='angle [°]')
ax0[0].set(ylabel='Force [N]')
ax0[1].plot(x2,y2) # loading Hoji
ax0[1].set_title('TLT Speedfit')
#ax0[1].plot(x2,y2,xm3,ym3, 'o', xm3, f3(xm3), '-') # loading Hoji
#ax0[1].plot(xm4,ym4, 'o', xm4, f4(xm4), '-', xm4, f4(xm4), '--') # unloading Hoji
ax0[1].set(xlabel='angle [°]')
ax0[1].set(ylabel='Force [N]')
plt.show()
# Calculation of area between loading and unloading curve -> Energy
area1_hoji=np.trapz(f1(xm1), xm1)
area2_hoji=np.trapz(f2(xm2), xm2)
area1_TLT=np.trapz(f3(xm3), xm3)
area2_TLT=np.trapz(f4(xm4), xm4)
energy_hoji=abs(area1_hoji-area2_hoji)
energy_TLT=abs(area1_TLT-area2_TLT)
#print('Energy release Hoji = ', energy_hoji, '[J]')
#print('Energy release TLT = ', energy_TLT, '[J]')
# # Bootsensing: load and plot
# We created a datalogger which is saving the experiment data in a .txt file on a SD card. After the experiments we took them from the SD card to our PC.
# <NAME> did an excellent work with his file reader (https://github.com/raphaFanti/multiSensor/blob/master/analysis/03.%20Experiments_200220/Analysis%20v02/datanalysis_200220-v02.ipynb) which I'm using here to load this data. I modified the col_names as we used adapted column names the last time and updated the experiment date. He implemented also a good way to store all in a big dataframe. I'll copy also this code from Raphael.
# In[54]:
# transforms a time string into a datetime element
def toDate(timeString):
hh, mm, ss = timeString.split(":")
return datetime.datetime(2020, 2, 28, int(hh), int(mm), int(ss)) # date of experiment: 28.02.20
# returns a dataframe for each sub experient
col_names = ["ID","strain1","strain2","strain3","temp","millis"] # column names from file
cols_ordered = ["time","strain1","strain2","strain3"] # order wished
cols_int = ["strain1","strain2","strain3"] # to be transformed to int columns
def getDf(fl, startTime):
# ! note that we remove the first data line for each measurement since the timestamp remains zero for two first lines
fl.readline() # line removed
line = fl.readline()
lines = []
while "Time" not in line:
cleanLine = line.rstrip()
# trick for int since parsing entire column was not working
intsLine = cleanLine.replace(".00", "")
splitedLine = intsLine.split(",")
lines.append(splitedLine)
line = fl.readline()
# create dataframe
df = pd.DataFrame(lines, columns = col_names)
# create time colum
df["time"] = df["millis"].apply(lambda x: startTime + datetime.timedelta(milliseconds = int(x)))
# drop ID, millis and temperature, and order columns
df = df.drop(["ID", "temp", "millis"], axis = 1)
df = df[cols_ordered]
# adjust types
df[cols_int] = df[cols_int].astype(int)
return df
# Load data to dataframe. As we were not working with our usually experiment protocol, I had to skip phase = bs2.
# In[55]:
filenames = ["2022823_exp1","2022848_exp2","2022857_exp3", "202285_exp4", "2022829_exp5"]
nExp = len(filenames) # we simply calculate the number of experiments
# big data frame
df = pd.DataFrame()
for i, this_file in enumerate(filenames):
# experiment counter
exp = i + 1
# open file
with open(this_file + ".TXT", 'r') as fl:
# throw away first 3 lines and get baseline 1 start time
for i in range(3):
fl.readline()
# get start time for first baseline
bl1_time = fl.readline().replace("BASELINE Time: ", "")
startTime = toDate(bl1_time)
# get data for first baseline
df_bl1 = getDf(fl, startTime)
df_bl1["phase"] = "bl1"
# get start time for experiment
exp_time = fl.readline().replace("RECORDING Time: ", "")
startTime = toDate(exp_time)
# get data for experiment
df_exp = getDf(fl, startTime)
df_exp["phase"] = "exp"
# get start time for second baseline
#bl2_time = fl.readline().replace("BASELINE Time: ", "")
#startTime = toDate(bl2_time)
# get data for second baseline
#df_bl2 = getDf(fl, startTime)
#df_bl2["phase"] = "bl2"
# create full panda
df_exp_full = pd.concat([df_bl1, df_exp])
# create experiment column
df_exp_full["exp"] = exp
df = pd.concat([df, df_exp_full])
# shift columns exp and phase to begining
cols = list(df.columns)
cols = [cols[0]] + [cols[-1]] + [cols[-2]] + cols[1:-2]
df = df[cols]
#print(df)
# In[56]:
def plotExpLines(df, exp):
fig, ax = plt.subplots(3, 1, figsize=(15, 8), sharex='col')
fig.suptitle('Experiment ' + str(exp), fontsize=16)
# fig.subplots_adjust(top=0.88)
ax[0].plot(dfExp["time"], dfExp["strain3"], 'tab:green')
ax[0].set(ylabel='strain3')
ax[1].plot(dfExp["time"], dfExp["strain1"], 'tab:red')
ax[1].set(ylabel='strain1')
ax[2].plot(dfExp["time"], dfExp["strain2"], 'tab:blue')
ax[2].set(ylabel='strain2')
ax[2].set(xlabel='time [ms]')
plt.show()
# ### Experiment 1
# In[57]:
figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.plot(df[df["exp"] == 1]['time'],df[df["exp"] == 1]['strain3'])
plt.xlabel('daytime')
plt.ylabel('4A')
plt.title('Experiment 1: 4A ')
plt.show()
# We applied 34 cycles.
# ### Experiment 2
# In[58]:
figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.plot(df[df["exp"] == 2]['time'],df[df["exp"] == 2]['strain3'])
plt.xlabel('daytime')
plt.ylabel('3A')
plt.title('Experiment 2: 3A ')
plt.show()
# # Experiment 3
# In[59]:
figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.plot(df[df["exp"] == 3]['time'],df[df["exp"] == 3]['strain3'])
plt.xlabel('daytime')
plt.ylabel('2B')
plt.title('Experiment 3: 2B ')
plt.show()
# ### Experiment 4
# In[60]:
figure(num=None, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k')
plt.plot(df[df["exp"] == 4]['time'],df[df["exp"] == 4]['strain3'])
plt.xlabel('daytime')
plt.ylabel('1A')
plt.title('Experiment 4: 1A ')
plt.show()
# ### Experiment 5
# In[61]:
fig, ax = plt.subplots(2, 1, figsize=(15, 8), sharex='col')
fig.suptitle('Experiment 5: 3B & 3C ', fontsize=16)
# fig.subplots_adjust(top=0.88)
ax[0].plot(df[df["exp"] == 5]['time'], df[df["exp"] == 5]['strain3'], 'tab:green')
ax[0].set(ylabel='3C')
ax[1].plot(df[df["exp"] == 5]['time'], df[df["exp"] == 5]['strain2'], 'tab:red')
ax[1].set(ylabel='3B')
ax[1].set(xlabel='daytime')
plt.show()
# In[62]:
#dfExp = df[df["exp"] == 3]
#plotExpLines(dfExp, 3)
# # Analysis
# Now we try to compare the data from the Flexometer of Fischer and from our Bootsensing.
# - Fischer: force F over displacement angle alpha
# - Bootsensing: deformation measured by strain gauge (resistance change) in at the moment unknown unit over time (daytime in plot shown)
# The idea now is to identify the last 5 cycles in Bootsensing data automatically and to exstract time information (t0,t). Afterwards this delta t can be applied on Fischers data to plot force F over the extracted time.
# ### Bootsensing: Cycle identification
# For Experiment 1-5 we will identfy the last 5 cycles of strain3. As the data of Fischer starts at a peak (maximum load), we will identify them also in our bootsensing data and extract the last 6 peak indexes. Applying these indices on strain3/time data we get the last 5 cycles.
#
# Find peaks: find_peaks function https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html
# Find valley: with Inverse of find peaks
#
#
# In[63]:
from scipy.signal import find_peaks
import numpy as np
# Load data of Experiments 1-5
ds={} # dict for strain data -> dataformat will be changed
dt={} # time data
peaks={} # peaks
valleys={} # valleys
inv_ds={} # inverse for valleys calculation
ds_peaks={} # index of peak (used for 5-2)
ds_peaks_end={} # index of last peaks
ds_valleys_end = {} # index of last valley
ds_valleys={} # index of valley (used for 5-2)
len_valley={} # valley lenght
for i in range(1,6): # i = Experiment number
ds[i]=df[df["exp"] == i]['strain3'] #data for strain3
dt[i]=df[df["exp"] == i]['time'] # time data
ds[i]=ds[i].dropna() # drop NaN
dt[i]=dt[i].dropna()
ds[i]=ds[i].reset_index(drop=True) #reset index
dt[i]=dt[i].reset_index(drop=True)
peaks[i],_=find_peaks(ds[i],prominence=100000) # find peaks
inv_ds[i]=ds[i]*(-1) # inverse of ds
valleys[i],_=find_peaks(inv_ds[i],prominence=10000) # find valleys
for j in range(1,6): # j = cycle number
ds_valleys[j,i]=valleys[i][-1-j:-j] # selecting last 5 valleys
ds_valleys_end[j,i]=valleys[i][-1:] # select last valley
ds_valleys[j,i]=ds_valleys[j,i][0] # assign index
ds_valleys_end[j,i]=ds_valleys_end[j,i][0]
ds_peaks[j,i]=peaks[i][-1-j:-j] # selecting last 5 peaks
ds_peaks_end[j,i]=peaks[i][-1:] # select last peak
ds_peaks[j,i]=ds_peaks[j,i][0] # assign index
ds_peaks_end[j,i]=ds_peaks_end[j,i][0]
#print(ds1[1][ds_valleys[1,1]])
#Calculate cycle lengths
#for i in range(1,6):
#len_valley[e] = dt1[e][ds_valleys[1,1]] - dt1[e][ds_valleys[2,1]] #1th
#len_valley1_2[i] = dt1[ds_valley_3[i]] - dt1[ds_valley_4[i]] #2th
#len_valley2_3[i] = dt1[ds_valley_2[i]] - dt1[ds_valley_3[i]] #3th
#len_valley3_4[i] = dt1[ds_valley_1[i]] - dt1[ds_valley_2[i]] #4th
#len_valley4_5[i] = dt1[ds_valley_last_end[i]] - dt1[ds_valley_1[i]] #5th
# EXPERIMENT 1: pay attention for peaks/valley after cycles
# Now we will plot the data for strain3 for each experiment with their peaks and valleys.
# In[64]:
# Plot peaks and valleys for Exp 1-5 for strain3
fig1, ax1 = plt.subplots(5, 1, figsize=(15, 8))
fig1.subplots_adjust(top=2)
fig1.suptitle('Experiments 1-5: peaks and valleys ', fontsize=16)
for i in range(5): # i for Experiment number
ax1[i].plot(df[df["exp"] == (i+1)]['time'], df[df["exp"] == (i+1)]['strain3'], 'tab:green')
ax1[i].plot(dt[(i+1)][peaks[(i+1)]],ds[(i+1)][peaks[(i+1)]],"x") #Plot peaks with x
ax1[i].plot(dt[(i+1)][valleys[(i+1)]],ds[(i+1)][valleys[(i+1)]],"o") #Plot valleys with o
ax1[i].set(ylabel='raw signal')
ax1[i].set(xlabel='daytime')
ax1[i].set_title('Experiment'+str(i+1))
plt.tight_layout()
fig1.subplots_adjust(top=0.88) # spacer between title and plot
plt.show()
# Plot last 5 cycles for Exp 1-5 for strain3
fig2, ax2 = plt.subplots(5, 1, figsize=(10, 8))
fig2.suptitle('Experiments 1-5: last 5 cycles ', fontsize=16)
for i in range(5): # i for Experiment number
ax2[i].plot(dt[(i+1)][ds_valleys[5,(i+1)]:ds_valleys_end[1,(i+1)]],ds[(i+1)][ds_valleys[5,(i+1)]:ds_valleys_end[1,(i+1)]]) # select data between 5th last and last valley
#ax2[i].plot(dt[(i+1)][ds_peaks[5,(i+1)]:ds_peaks_end[1,(i+1)]],ds[(i+1)][ds_peaks[5,(i+1)]:ds_peaks_end[1,(i+1)]])# select data between 5th last and last peak
ax2[i].set(ylabel='raw signal')
ax2[i].set(xlabel='daytime')
ax2[i].set_title('Experiment'+str(i+1))
plt.tight_layout()
fig2.subplots_adjust(top=0.88) # spacer between title and plot
plt.show()
#plt.axvline(x=dt[ds_valley_2_index],color="grey") #time borders 3th cycle
#plt.axvline(x=dt[ds_valley_3_index],color="grey")
#plt.axhline(y=ds[ds_valley_3_index],color="red") # h line
# For Experiment 2-5 the last 5 cycles are clear. The signal of experiment 1 is raising again after the cyclic loading as it's not possible to select the last 5 cycles with this "peaks" method, but happily we can extract still the last cycle.
# As we can see in the plot of the last 5 cycles above, the last cycle for Exp1, Exp3 and Exp5 is ending with a peak where Exp2 and Exp4 is ending with a valley. We can say this from the plots as we know from our exported machine data that a cycle ends always with the maximum force of 150N. This means a valley or peak for our bootsensing system.
# ### Match Fischer Data with Bootsensing cycle time
# Now we are going to match the Bootsensing cycle time with the force data of Fischer for each experiment 1-5. As the machine of Fischer applied the load with a frequency of 0.33 Hz, the cycle length of each cycle should be approximately t=3s. We verified this calculating the length between 2 neighbour valley of our bootsensing data (see code above).
# In[65]:
#Identify frequency of Fischer Dataacquisition
f={} # Fischer force matrix
freq={} # matrix with vector lenght to identify frequency
for i in range(5): #
f[i] = dfm[i][:,2*i] # load force data for Exp5, strain3 0,2,4,6,8
freq[i] = len(dfm[i][:,2*i]) # force vector len
#Create time linspace for Fischer data
#Timestamp can not be selected by item, done without manually
time_start1=dt[1][ds_peaks[5,1]] # Exp1: select manually last cycle
time_end1=dt[1][ds_peaks[4,1]]
time_start2=dt[2][ds_valleys[5,2]] # Exp2
time_end2=dt[2][ds_valleys[4,2]]
time_start3=dt[3][ds_peaks[5,3]] # Exp3
time_end3=dt[3][ds_peaks[4,3]]
time_start4=dt[4][ds_valleys[5,4]] # Exp4
time_end4=dt[4][ds_valleys[4,4]]
time_start5=dt[5][ds_peaks[5,5]] # Exp5
time_end5=dt[5][ds_peaks[4,5]]
#print(time_start1,time_end1)
x1=pd.date_range(time_start1, time_end1, periods=freq[0]).to_pydatetime()
x2=pd.date_range(time_start2, time_end2, periods=freq[1]).to_pydatetime()
x3=pd.date_range(time_start3, time_end3, periods=freq[2]).to_pydatetime()
x4=pd.date_range(time_start4, time_end4, periods=freq[3]).to_pydatetime()
x5=pd.date_range(time_start5, time_end5, periods=freq[4]).to_pydatetime()
#Plot Fischer Data in timerange x
fig3, ax3 = plt.subplots(5, 2, figsize=(12, 10))
fig3.suptitle('Experiments 1-5: Fischer F over Bootsensing daytime (left), Bootsensing cycle (right) ', fontsize=16)
ax3[0,0].plot(x1,f[0])
ax3[0,0].set(xlabel='daytime')
ax3[0,0].set(ylabel='F[N]')
ax3[0,0].set_title('Experiment 1')
ax3[1,0].plot(x2,f[1])
ax3[1,0].set(xlabel='daytime')
ax3[1,0].set(ylabel='F[N]')
ax3[1,0].set_title('Experiment 2')
ax3[2,0].plot(x3,f[2])
ax3[2,0].set(xlabel='daytime')
ax3[2,0].set(ylabel='F[N]')
ax3[2,0].set_title('Experiment 3')
ax3[3,0].plot(x4,f[3])
ax3[3,0].set(xlabel='daytime')
ax3[3,0].set(ylabel='F[N]')
ax3[3,0].set_title('Experiment 4')
ax3[4,0].plot(x5,f[4])
ax3[4,0].set(xlabel='daytime')
ax3[4,0].set(ylabel='F[N]')
ax3[4,0].set_title('Experiment 5')
#for i in range(1,5): # Exp2-5
#ax3[i,1].plot(dt[i+1][ds_peaks[2,i+1]:ds_peaks[1,i+1]],ds[i+1][ds_peaks[2,i+1]:ds_peaks[1,i+1]])
#ax3[i,1].set(ylabel='strain3')
#ax3[i,1].set(xlabel='daytime')
ax3[0,1].plot(dt[1][ds_peaks[5,1]:ds_peaks[4,1]],ds[1][ds_peaks[5,1]:ds_peaks[4,1]]) # special for Exp1 with peaks
ax3[0,1].set(xlabel='daytime')
ax3[0,1].set(ylabel='4A')
ax3[1,1].plot(dt[2][ds_valleys[5,2]:ds_valleys[4,2]],ds[2][ds_valleys[5,2]:ds_valleys[4,2]]) #Exp2 with valleys
ax3[1,1].set(xlabel='daytime')
ax3[1,1].set(ylabel='3A')
ax3[2,1].plot(dt[3][ds_peaks[5,3]:ds_peaks[4,3]],ds[3][ds_peaks[5,3]:ds_peaks[4,3]]) #Exp3 with peaks
ax3[2,1].set(xlabel='daytime')
ax3[2,1].set(ylabel='2B')
ax3[3,1].plot(dt[4][ds_valleys[5,4]:ds_valleys[4,4]],ds[4][ds_valleys[5,4]:ds_valleys[4,4]]) # Exp4 with valley
ax3[3,1].set(xlabel='daytime')
ax3[3,1].set(ylabel='1A')
ax3[4,1].plot(dt[5][ds_peaks[5,5]:ds_peaks[4,5]],ds[5][ds_peaks[5,5]:ds_peaks[4,5]]) #Exp5 with peaks
ax3[4,1].set(xlabel='daytime')
ax3[4,1].set(ylabel='3B')
plt.tight_layout()
fig3.subplots_adjust(top=0.88) # spacer between title and plot
plt.show()
# In the graphs of Fischer data (left side) you can note a little kink in unloading as well as in loading. In experiment 5 (TLT) the kink is much more prominent.
# ATTENTION: As we verified the length between neighbour valleys as well as neighbour peaks in our bootsensing data, we can confirm the freqeuncy of f=0.33 Hz applied by the machine (see plots below).
# ### Time delta Fischer&bootsensing
# Now we're going to find identify the extrema for Fischer force data and out bootsensing strain data for each single Experiment 1-5. As we applied the same timespan on the x-axis for both plot we can compare the x-coordinate of the left plot with the corresponding right one to check the response time (time delay) of our bootsensing system (like reaction time of strain gauges).
# In[66]:
# Find extrema in Fischer F for Exp 1-5 in last cycle
inv_f={} # inverse of F
valleys_f={} # valleys in Fischer F
fmin={} # f for extrema
for i in range(5): # find extrema (in this case valley)
inv_f[i]=f[i]*(-1) # inverse of f
valleys_f[i],_=find_peaks(inv_f[i],prominence=10) # find valleys
fmin[i]=f[i][valleys_f[i]] # y-coordinate for minima
# x-coordinate for minima
x1min=x1[valleys_f[0]] #Exp1
x2min=x2[valleys_f[1]] #Exp2
x3min=x3[valleys_f[2]] #Exp3
x4min=x4[valleys_f[3]] #Exp4
x5min=x5[valleys_f[4]] #Exp5
# Find extrema in bootsensing data for Exp 1-5 in last cycle
# extract time and strain for last cycle Exp1-5 (manually)
t1=dt[1][ds_peaks[5,1]:ds_peaks[4,1]] # Exp1 -> valley
t1=t1.reset_index(drop=True) # reset index
ds1=ds[1][ds_peaks[5,1]:ds_peaks[4,1]]
ds1=ds1.reset_index(drop=True)
t2=dt[2][ds_valleys[5,2]:ds_valleys[4,2]] # Exp2 -> peak
t2=t2.reset_index(drop=True)
ds2=ds[2][ds_valleys[5,2]:ds_valleys[4,2]]
ds2=ds2.reset_index(drop=True)
t3=dt[3][ds_peaks[5,3]:ds_peaks[4,3]] # Exp3 -> valley
t3=t3.reset_index(drop=True)
ds3=ds[3][ds_peaks[5,3]:ds_peaks[4,3]]
ds3=ds3.reset_index(drop=True)
t4=dt[4][ds_valleys[5,4]:ds_valleys[4,4]] # Exp4 -> peak
t4=t4.reset_index(drop=True)
ds4=ds[4][ds_valleys[5,4]:ds_valleys[4,4]]
ds4=ds4.reset_index(drop=True)
t5=dt[5][ds_peaks[5,5]:ds_peaks[4,5]] # Exp5 -> valley
t5=t5.reset_index(drop=True)
ds5=ds[5][ds_peaks[5,5]:ds_peaks[4,5]]
ds5=ds5.reset_index(drop=True)
# Find valley for Exp1,3,5
valley_ds1,_=find_peaks(ds1*(-1)) # Exp1
valley_ds3,_=find_peaks(ds3*(-1)) # Exp3
valley_ds5,_=find_peaks(ds5*(-1)) # Exp5
# Find peak for Exp2,4
peak_ds2,_=find_peaks(ds2) # Exp2
peak_ds4,_=find_peaks(ds4) # Exp4
# Apply extrema index on x-coordinate of bootsensing for Exp1-5
t1ext=t1[valley_ds1].dt.to_pydatetime() # converting in same format as xmin
t2ext=t2[peak_ds2].dt.to_pydatetime()
t3ext=t3[valley_ds3].dt.to_pydatetime()
t4ext=t4[peak_ds4].dt.to_pydatetime()
t5ext=t5[valley_ds5].dt.to_pydatetime()
#Calculating timedelta in format to_pydatetime()
deltat1=t1ext-x1min
deltat2=t2ext-x2min
deltat3=t3ext-x3min
deltat4=t4ext-x4min
deltat5=t5ext-x5min
print(deltat1,deltat2,deltat3,deltat4,deltat5)
# If we look at the timedelta for Exp1-5 we see that we are in range of deltat=0,007678s-0,1669s. For the setup at the moment if is enough. Maybe by increasing the data acquisition frequency we could improve this time delta.
# As we know that the machine applied the load with a frequency of f=0.33 Hz with f=1/T we can calculate the timespan of loading. Identifying the vector length of Fischer force data we can plot the force over time for each single cycle.
# In[67]:
fm=0.33 # frequency in Hz (preset)
T=1/fm #calculate time period T
fd={}
for i in range(5):
fd[i]= len(f[i])
freq=fd[0] #as all fd[i] have the same length we choose f[0]
x = np.linspace(0, T, freq, endpoint=False)
#Plot
fig4, ax4 = plt.subplots(5, 1, figsize=(6, 8))
fig4.suptitle('Experiments 1-5: Fischer F over time t ', fontsize=16)
for i in range(5):
ax4[i].plot(x,f[i])
ax4[i].set(xlabel='daytime')
ax4[i].set(ylabel='F[N]')
ax4[i].set_title('Experiment '+str(i+1))
plt.tight_layout()
fig4.subplots_adjust(top=0.88) # spacer between title and plot
plt.show()
# In[68]:
# Plot an example experiment with peaks and valleys for thesis
fig5, ax5 = plt.subplots(1, figsize=(15, 8))
#fig5.subplots_adjust(top=2)
#fig5.suptitle('Experiments 1-5: peaks and valleys ', fontsize=16)
ax5.plot(df[df["exp"] == (3)]['time'], df[df["exp"] == (3)]['strain3'], 'tab:blue',label='strain gauge 2b')
ax5.plot(dt[(3)][peaks[(3)]],ds[(3)][peaks[(3)]],"rx",label='peak') #Plot peaks with x
ax5.plot(dt[(3)][valleys[(3)]],ds[(3)][valleys[(3)]],"ro",label='valley') #Plot valleys with o
ax5.set(ylabel='raw signal')
ax5.set(xlabel='daytime')
ax5.set_title('Cyclic loading of TLT Speedfit')
ax5.legend()
plt.tight_layout()
fig5.subplots_adjust(top=0.88) # spacer between title and plot
plt.show()
# # Machine force and strain data matching
# In[69]:
from datetime import timedelta
# Select strain 4A (stored in strain3) and machine data for Experiment 1
data_s1=pd.concat([dt[1][ds_peaks[5,1]:ds_peaks[4,1]], ds[1][ds_peaks[5,1]:ds_peaks[4,1]]],axis=1).reset_index(drop=True) # one dataframe with strain and time
# Select strain 3C (stored in strain3) and machine data for Experiment 5
data_s5C=pd.concat([dt[5][ds_peaks[5,5]:ds_peaks[4,5]],ds[5][ds_peaks[5,5]:ds_peaks[4,5]]],axis=1).reset_index(drop=True) # one dataframe with strain and time
# Convert machine time to DataFrame in ms precision
x1=pd.DataFrame(x1,columns=['time']).astype('datetime64[ms]') # Experiment 1
x5=pd.DataFrame(x5,columns=['time']).astype('datetime64[ms]') # Experiment 5
# Convert machine force data to DataFrame
f1=pd.DataFrame(f[0],columns=['force [N]']) # Experiment 1
f5=pd.DataFrame(f[4],columns=['force [N]']) # Experiment 5
# Make one dataframe with machine time and force
data_m1=pd.concat([x1,f1],axis=1)
data_m5C=pd.concat([x5,f5],axis=1)
# Create new time for data_s storing in data_splus1
d = timedelta(microseconds=1000)
data_snew1=[]
data_snew5=[]
for i in range(0,len(data_s1)): # Experiment 1
data_new1=data_s1.iloc[i,0]+d
data_snew1.append(data_new1)
for i in range(0,len(data_s5C)): # Experiment 5
data_new5=data_s5C.iloc[i,0]+d
data_snew5.append(data_new5)
data_splus11=pd.DataFrame(data_snew1,columns=['time']) # convert data_snew in DataFrame
data_splus12=pd.concat([data_splus11,data_s1['strain3']],axis=1) # concat data_s with data_splus1
data_splus51C=pd.DataFrame(data_snew5,columns=['time']) # convert data_snew in DataFrame
data_splus52C=pd.concat([data_splus51C,data_s5C['strain3']],axis=1) # concat data_s with data_splus1
# Data matching of strain 4A with corresponding force Experiment 1
data_match11=
|
pd.merge(data_s1, data_m1, on=['time'])
|
pandas.merge
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 1 04:44:55 2018
@author: <NAME>
"""
import pandas as pd
#reading datafile
df = pd.read_csv('..\WiFi-SLAM\E-house\wifiscanlog - 2016-02-09.csv', sep=',')
df.head()
#filtering
df = df[df.groupby('scanId').scanId.transform(len) > 4]
number_of_APs = df.ssid.value_counts().size;
number_of_scans = df.scanId.value_counts().size;
print('number of scans = ' + str(number_of_scans))
print('number of APs = ' + str(number_of_APs))
#first 3*number_of scans represent scan position x,y,z
#then 3*number_of APs represent APs x,y,z
initial_guess = []
#initial values x,y,z for scans positions
scan_dic = {}
unique_scans_df = df.drop_duplicates('scanId')
scanIds = list(unique_scans_df['scanId'])
X_scans = list(unique_scans_df['gpslatitude'])
Y_scans = list(unique_scans_df['gpslongitude'])
Z_scans = list(unique_scans_df['slamFloor'])
for i in range(len(scanIds)):
scan_dic[scanIds[i]] = len(initial_guess)
initial_guess.append(X_scans[i])
initial_guess.append(Y_scans[i])
initial_guess.append(Z_scans[i])
#initial values x,y,z for APs positions
APs_dic = {}
ssid = list(set([x for x, nan_value in zip(df['ssid'],
|
pd.isnull(df['ssid'])
|
pandas.isnull
|
import pandas as pd
import numpy as np
import random
import sys
import pickle
import sys
import math
import gc
import os
import warnings
sys.path.append("..")
warnings.filterwarnings("ignore")
from .strategy import Strategy
import time
import copy
from tqdm import tqdm
from sklearn.preprocessing import MinMaxScaler
from xgboost import XGBClassifier
from torch.utils import data
from torchtools.optim import RangerLars
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Tuple, Optional
from utils import timer_func
from torchtools.nn import Mish
class AttentionSampling(Strategy):
""" Attention based model """
def __init__(self, args):
super(AttentionSampling,self).__init__(args)
self.neighbor_k = 50
self.hidden_size = 32
self.batch_size = args.batch_size
self.epoch = args.epoch
def generate_metagraph(self):
column_for_feature = ['cif.value', 'total.taxes', 'gross.weight', 'quantity', 'Unitprice', 'WUnitprice', 'TaxRatio', 'TaxUnitquantity', 'SGD.DayofYear', 'SGD.WeekofYear', 'SGD.MonthofYear'] + [col for col in self.data.train_lab.columns if 'RiskH' in col]
self.data.column_for_feature = column_for_feature
self.data.train_lab[self.data.column_for_feature] = self.data.train_lab[self.data.column_for_feature].fillna(0)
self.data.train_unlab['illicit'] = 0.5
self.data.train_unlab[self.data.column_for_feature] = self.data.train_unlab[self.data.column_for_feature].fillna(0)
self.data.valid_lab[self.data.column_for_feature] = self.data.valid_lab[self.data.column_for_feature].fillna(0)
self.data.test[self.data.column_for_feature] = self.data.test[self.data.column_for_feature].fillna(0)
self.xgb = XGBClassifier(booster='gbtree', scale_pos_weight=1,
learning_rate=0.3, colsample_bytree=0.4,
subsample=0.6, objective='binary:logistic',
n_estimators=4, max_depth=10, gamma=10)
self.xgb.fit(self.data.train_lab[column_for_feature], self.data.train_cls_label)
X_train_leaves = self.xgb.apply(self.data.train_lab[column_for_feature])
X_train_unlab_leaves = self.xgb.apply(self.data.train_unlab[column_for_feature])
X_valid_leaves = self.xgb.apply(self.data.valid_lab[column_for_feature])
X_test_leaves = self.xgb.apply(self.data.test[column_for_feature])
self.data.train_lab[[f'tree{i}' for i in range(4)]] = X_train_leaves
self.data.train_unlab[[f'tree{i}' for i in range(4)]] = X_train_unlab_leaves
self.data.valid_lab[[f'tree{i}' for i in range(4)]] = X_valid_leaves
self.data.test[[f'tree{i}' for i in range(4)]] = X_test_leaves
self.category = [f'tree{i}' for i in range(4)]
def prepare_dataloader(self):
self.train_ds = AttDataset(self.data, self.data.train_lab, self.data.train_unlab, self.data.train_lab, self.neighbor_k, self.category)
self.valid_ds = AttDataset(self.data, self.data.train_lab, self.data.train_unlab, self.data.valid_lab, self.neighbor_k, self.category)
train_valid_all_df = pd.concat([self.data.train_lab, self.data.valid_lab])
self.test_ds = AttDataset(self.data, train_valid_all_df, self.data.train_unlab, self.data.test, self.neighbor_k, self.category)
self.train_loader = torch.utils.data.DataLoader(self.train_ds, batch_size=self.batch_size, num_workers=8, shuffle=True, drop_last=True)
self.valid_loader = torch.utils.data.DataLoader(self.valid_ds, batch_size=self.batch_size, num_workers=8, shuffle=False, drop_last=False)
self.test_loader = torch.utils.data.DataLoader(self.test_ds, batch_size=self.batch_size, num_workers=8, shuffle=False, drop_last=False)
def train_model(self):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.input_dim = len(self.data.column_to_use)+1
self.model = AttDetect(self.input_dim, self.hidden_size, self.category)
self.model.to(device)
optimizer = RangerLars(self.model.parameters(), lr=0.01, weight_decay=0.0001)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,gamma=0.8)
best_f1_top = 0
for epoch in range(self.epoch):
print("epoch: ", epoch)
self.model.train()
loss_avg = 0
for i, batch in enumerate(tqdm(self.train_loader)):
row_feature, neighbor_stack, row_target = batch
row_feature = row_feature.to(device)
neighbor_stack = neighbor_stack.to(device)
row_target = row_target.to(device)
loss, logits = self.model(row_feature, neighbor_stack, row_target)
loss_avg += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
print('train_loss: ', loss_avg / len(self.train_loader))
# validation eval
self.model.eval()
with torch.no_grad():
logit_list = []
for i, batch in enumerate(tqdm(self.valid_loader)):
row_feature, neighbor_stack, row_target = batch
row_feature = row_feature.to(device)
neighbor_stack = neighbor_stack.to(device)
row_target = row_target.to(device)
loss, logits = self.model(row_feature, neighbor_stack, row_target)
logit_list.append(logits.reshape(-1, 1))
outputs = torch.cat(logit_list).detach().cpu().numpy().ravel()
f, pr, re = torch_metrics(np.array(outputs), self.valid_ds.target_Y.astype(int).to_numpy())
f1_top = np.mean(np.nan_to_num(f, nan = 0.0))
if f1_top > best_f1_top:
self.best_model = self.model
best_f1_top = f1_top
def predict_frauds(self):
""" Prediction for new dataset (test_model) """
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.best_model.eval()
logit_list = []
with torch.no_grad():
for i, batch in enumerate(tqdm(self.test_loader)):
row_feature, neighbor_stack, _ = batch
row_feature = row_feature.to(device)
neighbor_stack = neighbor_stack.to(device)
logits = self.model(row_feature, neighbor_stack)
logit_list.append(logits.reshape(-1, 1))
self.y_prob = torch.cat(logit_list).detach().cpu().numpy().ravel()
@timer_func
def query(self, k):
self.generate_metagraph()
self.prepare_dataloader()
self.train_model()
self.predict_frauds()
chosen = np.argpartition(self.y_prob[self.available_indices], -k)[-k:]
return self.available_indices[chosen].tolist()
class AttentionPlusRiskSampling(Strategy):
""" Attention + Discounted RiskMAB model """
def __init__(self, args):
super(AttentionPlusRiskSampling,self).__init__(args)
self.neighbor_k = 50
self.hidden_size = 32
self.batch_size = args.batch_size
self.epoch = args.epoch
self.decay = math.pow(0.9, (1/365))
def generate_metagraph(self):
column_for_feature = ['cif.value', 'total.taxes', 'gross.weight', 'quantity', 'Unitprice', 'WUnitprice', 'TaxRatio', 'TaxUnitquantity', 'SGD.DayofYear', 'SGD.WeekofYear', 'SGD.MonthofYear'] + [col for col in self.data.train_lab.columns if 'RiskH' in col]
self.data.column_for_feature = column_for_feature
self.data.train_lab[self.data.column_for_feature] = self.data.train_lab[self.data.column_for_feature].fillna(0)
self.data.train_unlab['illicit'] = 0.5
self.data.train_unlab[self.data.column_for_feature] = self.data.train_unlab[self.data.column_for_feature].fillna(0)
self.data.valid_lab[self.data.column_for_feature] = self.data.valid_lab[self.data.column_for_feature].fillna(0)
self.data.test[self.data.column_for_feature] = self.data.test[self.data.column_for_feature].fillna(0)
self.xgb = XGBClassifier(booster='gbtree', scale_pos_weight=1,
learning_rate=0.3, colsample_bytree=0.4,
subsample=0.6, objective='binary:logistic',
n_estimators=4, max_depth=10, gamma=10)
self.xgb.fit(self.data.train_lab[column_for_feature], self.data.train_cls_label)
X_train_leaves = self.xgb.apply(self.data.train_lab[column_for_feature])
X_train_unlab_leaves = self.xgb.apply(self.data.train_unlab[column_for_feature])
X_valid_leaves = self.xgb.apply(self.data.valid_lab[column_for_feature])
X_test_leaves = self.xgb.apply(self.data.test[column_for_feature])
self.data.train_lab[[f'tree{i}' for i in range(4)]] = X_train_leaves
self.data.train_unlab[[f'tree{i}' for i in range(4)]] = X_train_unlab_leaves
self.data.valid_lab[[f'tree{i}' for i in range(4)]] = X_valid_leaves
self.data.test[[f'tree{i}' for i in range(4)]] = X_test_leaves
self.category = [f'tree{i}' for i in range(4)]
def prepare_dataloader(self):
self.train_ds = AttDataset(self.data, self.data.train_lab, self.data.train_unlab, self.data.train_lab, self.neighbor_k, self.category)
self.valid_ds = AttDataset(self.data, self.data.train_lab, self.data.train_unlab, self.data.valid_lab, self.neighbor_k, self.category)
train_valid_all_df =
|
pd.concat([self.data.train_lab, self.data.valid_lab])
|
pandas.concat
|
############################
# written by <NAME> and <NAME>
############################
"""
record_history
There are three phases to record the training process history.
The result will be recorded in a dictionary named statistics.
phase 'initial': set up the dictionary statistics at the beginning
phase 'measure': called each time we want to record the data to the dictionary
phase 'save' : save the dictionary when complete training. We can modify to save the weight.
"""
import pandas as pd
import numpy as np
from util_func import *
def record_history (phase, number, X, Y, X_test, Y_test, w, lamb, record_path, record_name):
# Initialize phase:
if (phase == 'initial'):
global statistics
statistics = {
"number": [],
"loss": [],
"grad_loss": [],
"acc_train": [],
"acc_test": []
}
global num_train, num_feature, num_test
num_train, num_feature = np.shape(X)
num_test = len(Y_test)
# Measure phase:
if (phase == 'measure'):
# Evaluate -------------------------------------------------------------
train_loss = func_val(num_train, num_feature, X, Y, w, lamb)
full_grad = full_grad_eval(num_train, num_feature, X, Y, w, lamb)
gradient_norm_square = np.dot(full_grad, full_grad)
train_acc = accuracy(num_train, num_feature, X, Y, w, lamb)
test_acc = accuracy(num_test, num_feature, X_test, Y_test, w, lamb)
# Add statistics -------------------------------------------------------
print ('--- # %d Loss %.5f Grad Loss %.5f Train & Test Acc: %.2f %.2f' % (number, train_loss,
gradient_norm_square, train_acc, test_acc))
statistics ["number"].append(number)
statistics ["loss"].append(train_loss)
statistics ["grad_loss"].append(gradient_norm_square)
statistics ["acc_train"].append(train_acc)
statistics ["acc_test"].append(test_acc)
# Save phase:
if (phase == 'save'):
# Save the training process history
record_name = record_name + '.csv'
for key, value in statistics.items():
|
pd.DataFrame(value)
|
pandas.DataFrame
|
import os
import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None
class Projection:
''' Read and Store Fangraphs Projection files in instance of class Projection.
Parameters
----------
path_data : string [optional]
Where the data is stored (before date)
model : string [optional]
Choose from ZiPS (default), Steamer, Fans
year: int [optional]
year of projections
Returns
-------
Instance of Projection, which contains:
Objects:
- self.statline
- self.hitters_rank
- self.pitchers_rank
- self.hitter_stats
- self.pitchers_stats
Functions:
- precompute_statlines
'''
def __init__(self, model = 'ZiPS', year = 2019, path_data = '/data/baseball/Fangraphs/projections/'):
self.statline = {}
self.hitters_rank = {}
self.pitchers_rank= {}
self.hitters_stats = pd.DataFrame()
# Read in Batters by Position for Year and Position
for file in os.listdir(path_data+str(year)+'/'):
if file.startswith(model) & ~file.endswith('Hitters.csv') & ~file.endswith('Pitchers.csv') :
#print(os.path.join(path_data+str(year)+'/', file))
df = pd.read_csv(os.path.join(path_data+str(year)+'/', file), index_col = 'playerid')
fn = str.split(file,'.')[0][-2:]
if fn == '_C': fn = 'C'
#print fn
df['Position'] = fn
df['Rank'] = 999
if self.hitters_stats.empty:
self.hitters_stats = df
else:
self.hitters_stats =
|
pd.concat([self.hitters_stats, df])
|
pandas.concat
|
import pandas as pd
import os
import re
from datetime import datetime
EV_PATTERN = re.compile('^ev_dump_.*\.csv$', re.IGNORECASE)
DEVICE_SCHEMA = {
'timestamp': 'str',
'device_id': 'str',
'event_type': 'str',
'event_payload': 'str'
}
def __clean_timestamp(rawValue:str) -> datetime:
try:
if rawValue is not None:
actualVal = float(rawValue)
return datetime.fromtimestamp(actualVal)
except Exception:
pass
return None
def __clean_device_id(rawValue:str) -> str:
try:
if rawValue is not None and len(rawValue) == 8:
# parse as hex into decimal to confirm its a valid value
_ = int(rawValue, 16)
return rawValue.lower()
except Exception:
pass
return None
def __clean_event_type(rawValue:str) -> str:
if rawValue is None or len(rawValue) >= 256:
# unexpected value, lets skip it
return None
return rawValue.lower()
def get_ev_data(rootDir:str) -> pd.DataFrame:
"""Retrieves all EV data from the given folder and returns as a DataFrame."""
combinedDf = None
for fileName in os.listdir(rootDir):
absFilePath = os.path.join(rootDir, fileName)
if not os.path.isfile(absFilePath):
continue # assuming all files are within the given directory and not subdirectories.
if not re.match(EV_PATTERN, fileName):
# not an ev file - skip for now
continue
df = pd.read_csv(absFilePath, sep=',', skiprows=1, skip_blank_lines=True, header=None, names=DEVICE_SCHEMA.keys(), dtype=DEVICE_SCHEMA, on_bad_lines='warn')
df['timestamp'] = df['timestamp'].apply(__clean_timestamp)
df['device_id'] = df['device_id'].apply(__clean_device_id)
df['event_type'] = df['event_type'].apply(__clean_event_type)
df.dropna(inplace=True)
if combinedDf is None:
combinedDf = df
else:
combinedDf =
|
pd.concat([combinedDf, df])
|
pandas.concat
|
#https://github.com/pau-lo/Random-Forest-Classifier-for-Breast-Cancer-Prediction/blob/master/RF-(RandomForestClassifer)-Breast-Cancer-Prediction.ipynb
###### IMPORT MODULES #### ###
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold, cross_val_score, validation_curve
from sklearn import linear_model
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sympy import *
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV
from sklearn.neural_network import MLPClassifier
import numpy as np
from scipy import interp
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
import numpy as np
import pandas as pd
# dependencies for plotting
import matplotlib.pyplot as plt
from pylab import rcParams
import matplotlib as mpl
import seaborn as sns
# dependencies for statistic analysis
import statsmodels.api as sm
from scipy import stats
#importing our parameter tuning dependencies
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import (cross_val_score, GridSearchCV,
StratifiedKFold, ShuffleSplit )
#importing our dependencies for Feature Selection
from sklearn.feature_selection import (SelectKBest, chi2, RFE, RFECV)
from sklearn.linear_model import LogisticRegression, RandomizedLogisticRegression
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.cross_validation import ShuffleSplit
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score
from collections import defaultdict
# Importing our sklearn dependencies for the modeling
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import KFold
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import (accuracy_score, confusion_matrix,
classification_report, roc_curve, auc)
from sklearn.model_selection import cross_val_predict
from itertools import cycle
from scipy import interp
import warnings
warnings.filterwarnings('ignore')
##read in the data
shared =
|
pd.read_table("data/baxter.0.03.subsample.shared")
|
pandas.read_table
|
import pandas as pd
import matplotlib.pyplot as plt
from ..utils.calc import *
class Radiosonde(pd.DataFrame):
"""A radiosonde object"""
def __init__(self, df, meta=None, ddz_smooth_window=10):
pd.DataFrame.__init__(self, index=df['Height'].values)
self.index.name = 'z'
setattr(self,'raw_df', df)
setattr(self, 'meta', meta)
# compact values for calculation
# loading variables to numpy array to easier computation
self['timestamp'] = df['timestamp'].values
self['z'] = df['Height'].values
self['U'] = df['WindSpeed'].values
self['UDir'] = df['WindDir'].values
self['v'] = df['WindNorth'].values
self['u'] = df['WindEast'].values
self['T_K'] = df['Temperature'].values
self['T'] = self['T_K'] - 273.15
self['P'] = df['Pressure'].values
self['RH'] = df['Humidity'].values
self['es'] = 6.11 * 10 ** (7.5 * (self['T']) / (self['T'] + 237.3)) # sat vapor pres
self['e'] = self['RH'] * self['es'] / 100 # vapor pres
self['r'] = 0.622 * self['e'] / self['P'] # mixing ratio (1000 to convert to kgkg-1)
self['q'] = self['r'] / (1 + self['r'])
self['T_v_K'] = self['T_K'] * (1 + 0.61 * self['q'])
self['T_v'] = self['T_v_K'] - 273.15
# moist static energy (no contribution from water droplets)
# Note: only 1.2% drop of g at 40km, so g taken as constant
self['mse'] = Cpd * self['T_K'] + g * self['z'] \
+ Lw(self['T']) * self['q']
# potential temperature for moist air
self['theta_v'] = self['T_K'] * (1000/self['P']) \
** (0.2854 * (1-0.28e-3 * self['r'] * 1000))
# dew point temperature
self['T_D'] = (237.3 * np.log10(self['e']/6.11)) \
/ (7.5 - np.log10(self['e']/6.11))
self['T_DK'] = self['T_D'] + 273.15
self._compute_theta_e()
self._compute_ddz_vars(ddz_smooth_window=ddz_smooth_window)
# equivalent potential temperature
## Computed with iterative calc on dew point and pressure
def _compute_theta_e(self):
'''
Compute equivalent potential temperature with bolton (1980) method"
'''
T_DK = self['T_DK']
T_K = self['T_K']
r = self['r']
T_LK = 1 / (1 / (T_DK - 56) + np.log(T_K/T_DK)/800) + 56
P = self['P']
theta_e = T_K * (1000/P) ** (0.2854 * (1 - 0.28e-3 * r)) \
* np.exp( (3.376 / T_LK - 0.00254) * r * (1 + 0.81e-3 * r))
self['T_LK'] = T_LK
self['theta_e'] = theta_e
def _compute_ddz_vars(self, ddz_smooth_window=10):
'''
Compute vertical buoyancy gradient, velocity gradient and gradient
Richardson number
'''
self['dTheta_edz'] = self._ddz('theta_e', ddz_smooth_window)
self['dTheta_vdz'] = self._ddz('theta_v', ddz_smooth_window)
self['dUdz'] = self._ddz('U', ddz_smooth_window)
self['dbdz'] = g / self['theta_e'] * self['dTheta_edz']
self['Ri_g'] = (g / self['T_v_K'] * self['dTheta_vdz']) \
/ self['dUdz'] ** 2
def _ddz(self, varName, grad_scheme=gradx, ddz_smooth_window=10):
"""Compute vertical gradient of a given variable"""
ser_tmp_sm = self[varName].rolling(ddz_smooth_window).mean()
dudz, z_grad = grad_scheme(ser_tmp_sm.values, ser_tmp_sm.index.values)
return
|
pd.Series(dudz, index=z_grad)
|
pandas.Series
|
import pypipegraph as ppg
import mbf_genomes
import collections
import cutadapt
import cutadapt.align
import pandas as pd
from pathlib import Path
from .util import read_fastq_iterator, get_fastq_iterator
from typing import Callable, List, Union
from pypipegraph import Job
AdapterMatch = collections.namedtuple(
"AdapterMatch", ["astart", "astop", "rstart", "rstop", "matches", "errors"]
)
class CutadaptMatch:
def __init__(
self,
adapter_start: str,
adapter_end: str,
cutadapt_error_rate: int = 0,
min_overlap: int = 3,
) -> None:
self.adapter_sequence_begin = adapter_start
self.adapter_sequence_end = adapter_end
self.maximal_error_rate = cutadapt_error_rate
self.min_overlap = min_overlap
self.adapter_sequence_begin_reverse = mbf_genomes.common.reverse_complement(
self.adapter_sequence_begin
)
self.adapter_sequence_end_reverse = mbf_genomes.common.reverse_complement(
self.adapter_sequence_end
)
self.where_fwd = (
cutadapt.align.START_WITHIN_SEQ2
| cutadapt.align.START_WITHIN_SEQ1
# | cutadapt.align.STOP_WITHIN_SEQ2
| cutadapt.align.STOP_WITHIN_SEQ1
)
self.where_rev = (
cutadapt.align.START_WITHIN_SEQ2
| cutadapt.align.STOP_WITHIN_SEQ2
| cutadapt.align.STOP_WITHIN_SEQ1
)
self.adapters = {}
for aname, asequence, where in [
("adapter_sequence_begin", self.adapter_sequence_begin, self.where_fwd),
(
"adapter_sequence_begin_reverse",
self.adapter_sequence_begin_reverse,
self.where_fwd,
),
("adapter_sequence_end", self.adapter_sequence_end, self.where_rev),
(
"adapter_sequence_end_reverse",
self.adapter_sequence_end_reverse,
self.where_rev,
),
]:
alen = len(asequence)
if isinstance(asequence, str) and alen > 0:
adapter = cutadapt.align.Aligner(
asequence if asequence else "",
self.maximal_error_rate / len(asequence),
# where,
wildcard_ref=True,
wildcard_query=False,
min_overlap=self.min_overlap,
)
else:
adapter = None
self.adapters[aname] = adapter
def match(self, adapter, seq):
alignment = adapter.locate(seq)
if alignment is None:
return None
_match = AdapterMatch(*alignment)
return _match
def filter(self):
def filter_func(seq1, qual1, name1, seq2, qual2, name2):
seq1 = seq1.decode()
seq2 = seq2.decode()
match_begin_fwd1 = self.match(self.adapters["adapter_sequence_begin"], seq1)
match_begin_fwd2 = self.match(self.adapters["adapter_sequence_begin"], seq2)
if match_begin_fwd1 is None and match_begin_fwd2 is None:
# forward adapter nowhere to be found, discard
# print(name1)
# print(self.adapters["adapter_sequence_begin"])
# print(seq1)
# print(seq2)
# print("<<match_begin_fwd1<<", match_begin_fwd1)
# print("here0")
# raise ValueError()
return None
elif match_begin_fwd1 is None:
# forward adapter is in read2
seq1, qual1, seq2, qual2 = seq2, qual2, seq1, qual1
match_begin_fwd = match_begin_fwd2
elif match_begin_fwd2 is None:
match_begin_fwd = match_begin_fwd1
else:
# both not none, take the best fit
if match_begin_fwd1.errors <= match_begin_fwd2.errors:
match_begin_fwd = match_begin_fwd1
else:
match_begin_fwd = match_begin_fwd2
seq1, qual1, seq2, qual2 = seq2, qual2, seq1, qual1
i1 = match_begin_fwd.rstop
# adapter_begin forward found
match_end_fwd = self.match(
self.adapters["adapter_sequence_end_reverse"],
mbf_genomes.common.reverse_complement(seq1),
)
if match_end_fwd is not None:
# adapter_end forward found
print("<<match_end_fwd<<", match_end_fwd)
i2 = len(seq1) - match_end_fwd.rstop
else:
i2 = len(seq1)
# now the second read must have the reverse adapters
match_end_rev = self.match(
self.adapters["adapter_sequence_end_reverse"], seq2
)
# print("j1", self.adapter_sequence_end_reverse, match_end_rev)
if match_end_rev is not None:
j1 = match_end_rev.rstop
match_begin_rev = self.match(
self.adapters["adapter_sequence_begin"],
mbf_genomes.common.reverse_complement(seq2),
)
if match_begin_rev is None:
j2 = len(seq2)
else:
j2 = len(seq2) - match_begin_rev.rstop # match_begin_rev.rstart
else:
# reverse read is not matching, discard
# print("here1")
return None
s1 = seq1[i1:i2]
q1 = qual1[i1:i2]
s2 = seq2[j1:j2]
q2 = qual2[j1:j2]
if s1 == "" or s2 == "":
print(seq1)
print(seq2)
print("----------")
print(s1)
print(s2)
print(i1, i2, j1, j2)
# raise ValueError()
print("here2")
return None
return (s1.encode(), q1, name1, s2.encode(), q2, name2)
return filter_func
def trim_and_sort(self):
def trim_func(seq1, qual1, name1, seq2, qual2, name2):
seq1 = seq1.decode()
seq2 = seq2.decode()
match_begin_fwd1 = None
match_begin_fwd2 = None
match_begin_fwd1
match_begin_fwd1
match_begin_fwd1 = self.match(self.adapters["adapter_sequence_begin"], seq1)
match_begin_fwd2 = self.match(self.adapters["adapter_sequence_begin"], seq2)
if match_begin_fwd1 is None and match_begin_fwd2 is None:
# forward adapter nowhere to be found, discard
print(self.adapters["adapter_sequence_begin"])
print(seq1)
print("<<match_begin_fwd1<<", match_begin_fwd1)
print("here0")
return None
elif match_begin_fwd1 is None:
# forward adapter is in read2
seq1, qual1, seq2, qual2 = seq2, qual2, seq1, qual1
match_begin_fwd = match_begin_fwd2
elif match_begin_fwd2 is None:
match_begin_fwd = match_begin_fwd1
else:
# both not none, take the best fit
if match_begin_fwd1.errors <= match_begin_fwd2.errors:
match_begin_fwd = match_begin_fwd1
else:
match_begin_fwd = match_begin_fwd2
seq1, qual1, seq2, qual2 = seq2, qual2, seq1, qual1
i1 = match_begin_fwd.rstop
# adapter_begin forward found
match_end_fwd = self.match(
self.adapters["adapter_sequence_end_reverse"],
mbf_genomes.common.reverse_complement(seq1),
)
if match_end_fwd is not None:
# adapter_end forward found
print("<<match_end_fwd<<", match_end_fwd)
i2 = len(seq1) - match_end_fwd.rstop
else:
i2 = len(seq1)
# now the second read must have the reverse adapters
match_end_rev = self.match(
self.adapters["adapter_sequence_end_reverse"], seq2
)
# print("j1", self.adapter_sequence_end_reverse, match_end_rev)
if match_end_rev is not None:
j1 = match_end_rev.rstop
match_begin_rev = self.match(
self.adapters["adapter_sequence_begin"],
mbf_genomes.common.reverse_complement(seq2),
)
if match_begin_rev is None:
j2 = len(seq2)
else:
j2 = len(seq2) - match_begin_rev.rstop # match_begin_rev.rstart
else:
# reverse read is not matching, discard
print("here")
return None
s1 = seq1[i1:i2]
q1 = qual1[i1:i2]
s2 = seq2[j1:j2]
q2 = qual2[j1:j2]
print(seq1)
print(seq2)
print("----------")
print(s1)
print(s2)
print(i1, i2, j1, j2)
# raise ValueError()
if s1 == "" or s2 == "":
print("here2")
return None
print(i1, i2, j1, j2)
print((s1.encode(), q1, name1, s2.encode(), q2, name2))
raise ValueError()
return (s1.encode(), q1, name1, s2.encode(), q2, name2)
return filter_func
def filter_single(self) -> Callable:
"""
filter_single returns a filter function that filters paired end reads to a single read.
filter_single returns a filter function that takes paired end reads,
identifies the read that contains forward/reverse adapter sequences, trims thoser sequences
and returns this single mate pair trimmed at the adapter occurences.
Returns
-------
Callable
Filter function for fastqw processor.
"""
def filter_func(seq1, qual1, name1, seq2, qual2, name2):
seq1 = seq1.decode()
seq2 = seq2.decode()
match_begin_fwd1 = self.match(self.adapters["adapter_sequence_begin"], seq1)
match_begin_fwd2 = self.match(self.adapters["adapter_sequence_begin"], seq2)
if match_begin_fwd1 is None and match_begin_fwd2 is None:
# forward adapter nowhere to be found, discard
return None
elif match_begin_fwd1 is None:
# forward adapter is in read2, switch the reads
seq1, qual1, name1, seq2, qual2, name2 = (
seq2,
qual2,
name2,
seq1,
qual1,
name1,
)
match_begin_fwd = match_begin_fwd2
match_end_fwd = self.match(self.adapters["adapter_sequence_end"], seq2)
elif match_begin_fwd2 is None:
# forward adapter is in first read
match_begin_fwd = match_begin_fwd1
match_end_fwd = self.match(self.adapters["adapter_sequence_end"], seq1)
else:
# both not none, check the end
match_end_fwd1 = self.match(self.adapters["adapter_sequence_end"], seq1)
match_end_fwd2 = self.match(self.adapters["adapter_sequence_end"], seq2)
if match_end_fwd1 is None and match_end_fwd2 is not None:
# take the second, switch the reads
seq1, qual1, name1, seq2, qual2, name2 = (
seq2,
qual2,
name2,
seq1,
qual1,
name1,
)
match_end_fwd = match_end_fwd2
match_begin_fwd = match_begin_fwd2
else:
# take the first, as it contains the end adapter
match_end_fwd = match_end_fwd1
match_begin_fwd = match_begin_fwd1
i1 = match_begin_fwd.rstop
# adapter_begin forward found
if match_end_fwd is not None:
# adapter_end forward found
i2 = match_end_fwd.rstart
else:
i2 = len(seq1)
s1 = seq1[i1:i2]
q1 = qual1[i1:i2]
if s1 == "":
return None
return (s1.encode(), q1, name1)
return filter_func
def count_adapter_occurences(
self,
r1,
r2,
output_file,
max=100000,
dependencies=[],
):
if isinstance(output_file, str):
outfile = Path(output_file)
outfile.parent.mkdir(parents=True, exist_ok=True)
def __dump():
iter1 = get_fastq_iterator(r1)
iter2 = get_fastq_iterator(r2)
counter = collections.Counter()
examples = {}
count = 0
for tup in zip(iter1, iter2):
print(tup)
seq1, name1, _ = tup[0]
seq2, name2, _ = tup[1]
# seq1 = seq1.decode()
# seq2 = seq2.decode()
match_begin_fwd1 = (
self.match(self.adapters["adapter_sequence_begin"], seq1)
is not None
)
match_begin_fwd2 = (
self.match(self.adapters["adapter_sequence_begin"], seq2)
is not None
)
match_end_fwd1 = (
self.match(self.adapters["adapter_sequence_end"], seq1) is not None
)
match_end_fwd2 = (
self.match(self.adapters["adapter_sequence_end"], seq2) is not None
)
match_begin_rev1 = (
self.match(self.adapters["adapter_sequence_begin_reverse"], seq1)
is not None
)
match_begin_rev2 = (
self.match(self.adapters["adapter_sequence_begin_reverse"], seq2)
is not None
)
match_end_rev1 = (
self.match(self.adapters["adapter_sequence_end_reverse"], seq1)
is not None
)
match_end_rev2 = (
self.match(self.adapters["adapter_sequence_end_reverse"], seq2)
is not None
)
key = (
match_begin_fwd1,
match_begin_fwd2,
match_end_fwd1,
match_end_fwd2,
match_begin_rev1,
match_begin_rev2,
match_end_rev1,
match_end_rev2,
)
counter[key] += 1
if key not in examples:
examples[key] = (seq1, seq2, name1, name2)
count += 1
if count >= max:
break
to_df = {
"Begin_forward_1": [],
"Begin_forward_2": [],
"End_forward_1": [],
"End_forward_2": [],
"Begin_reverse_1": [],
"Begin_reverse_2": [],
"End_reverse_1": [],
"End_reverse_2": [],
"Count": [],
"Example": [],
}
for key in counter:
to_df["Begin_forward_1"].append(key[0])
to_df["Begin_forward_2"].append(key[1])
to_df["End_forward_1"].append(key[2])
to_df["End_forward_2"].append(key[3])
to_df["Begin_reverse_1"].append(key[4])
to_df["Begin_reverse_2"].append(key[5])
to_df["End_reverse_1"].append(key[6])
to_df["End_reverse_2"].append(key[7])
to_df["Count"].append(counter[key])
to_df["Example"].append(examples[key])
df = pd.DataFrame(to_df)
df.to_csv(output_file, sep="\t", index=False)
return ppg.FileGeneratingJob(outfile, __dump).depends_on(dependencies)
@staticmethod
def count_most_common_sequences(
r1: Union[str, Path],
r2: Union[str, Path],
output_file: Union[str, Path],
max: int = 100000,
dependencies: List[Job] = [],
):
if isinstance(output_file, str):
outfile = Path(output_file)
outfile.parent.mkdir(parents=True, exist_ok=True)
def __dump():
iter1 = get_fastq_iterator(r1)
iter2 = get_fastq_iterator(r2)
counter = collections.Counter()
examples = {}
count = 0
for tup in zip(iter1, iter2):
seq1, name1, _ = tup[0]
seq2, name2, _ = tup[1]
key = (seq1, seq2)
counter[key] += 1
if key not in examples:
examples[key] = (name1, name2)
count += 1
if count >= max:
break
to_df = {
"Seq1": [],
"Seq2": [],
"Count": [],
"Example": [],
}
for key in counter:
to_df["Seq1"].append(key[0])
to_df["Seq2"].append(key[1])
to_df["Count"].append(counter[key])
to_df["Example"].append(examples[key])
df =
|
pd.DataFrame(to_df)
|
pandas.DataFrame
|
import pandas as pd
import pandas.api.types as types
from Levenshtein.StringMatcher import distance
""" Find and replace """
def find_replace(dataframe: pd.DataFrame, column_name: str, to_replace, value) -> pd.DataFrame:
dataframe[column_name].replace(to_replace, value, inplace=True)
return dataframe
def find_replace_regex(dataframe: pd.DataFrame, column_name: str, to_replace: str, value: str) -> pd.DataFrame:
if types.is_string_dtype(dataframe[column_name]):
dataframe[column_name].replace(to_replace, value, inplace=True, regex=True)
return dataframe
def find_replace_all(dataframe: pd.DataFrame, to_replace, value) -> pd.DataFrame:
dataframe.replace(to_replace, value, inplace=True)
return dataframe
""" Normalization """
def normalize(dataframe: pd.DataFrame, column_name: str) -> pd.DataFrame:
col = dataframe[column_name]
if not types.is_numeric_dtype(col):
return dataframe
dataframe[column_name] = (col - col.min()) / (col.max() - col.min())
return dataframe
def normalize_all(dataframe: pd.DataFrame) -> pd.DataFrame:
func = {False: lambda col: col,
True: lambda col: (col - col.min()) / (col.max() - col.min())}
return dataframe.transform(lambda col: func[types.is_numeric_dtype(dataframe[col.name])](col))
""" Outliers """
def remove_outliers(dataframe: pd.DataFrame, column_name: str, outside_range: float) -> pd.DataFrame:
col = dataframe[column_name]
if not types.is_numeric_dtype(col):
return dataframe
return dataframe[(col - col.mean()).abs() <= (col.std() * outside_range)]
def remove_all_outliers(dataframe: pd.DataFrame, outside_range: float) -> pd.DataFrame:
return dataframe[dataframe.apply(lambda col:
not types.is_numeric_dtype(dataframe[col.name])
or (col - col.mean()).abs() <= (outside_range * col.std())).all(axis=1)]
""" Empty fields """
def fill_empty_mean(dataframe: pd.DataFrame, column_name: str) -> pd.DataFrame:
if not types.is_numeric_dtype(dataframe[column_name]):
return dataframe
dataframe[column_name] = dataframe[column_name].fillna(dataframe[column_name].mean())
return dataframe
def fill_empty_median(dataframe: pd.DataFrame, column_name: str) -> pd.DataFrame:
if not types.is_numeric_dtype(dataframe[column_name]):
return dataframe
dataframe[column_name] = dataframe[column_name].fillna(dataframe[column_name].median())
return dataframe
def fill_empty_value(dataframe: pd.DataFrame, column_name: str, value) -> pd.DataFrame:
dataframe[column_name] = dataframe[column_name].fillna(value)
return dataframe
def fill_all_empty_mean(dataframe: pd.DataFrame) -> pd.DataFrame:
func = {False: lambda col: col,
True: lambda col: col.fillna(col.mean())}
return dataframe.apply(lambda col: func[types.is_numeric_dtype(dataframe[col.name])](col))
def fill_all_empty_median(dataframe: pd.DataFrame) -> pd.DataFrame:
func = {False: lambda col: col,
True: lambda col: col.fillna(col.median())}
return dataframe.apply(lambda col: func[types.is_numeric_dtype(dataframe[col.name])](col))
""" Discretization """
def discretize_equiwidth(dataframe: pd.DataFrame, column_name: str, nr_bins: int) -> pd.DataFrame:
if types.is_numeric_dtype(dataframe[column_name]):
dataframe[column_name] = pd.cut(dataframe[column_name], nr_bins).apply(str)
return dataframe
def discretize_equifreq(dataframe: pd.DataFrame, column_name: str, nr_bins: int) -> pd.DataFrame:
if types.is_numeric_dtype(dataframe[column_name]):
dataframe[column_name] = pd.qcut(dataframe[column_name], nr_bins).apply(str)
return dataframe
def discretize_ranges(dataframe: pd.DataFrame, column_name: str, boundaries: [float]) -> pd.DataFrame:
col = dataframe[column_name]
if types.is_numeric_dtype(col):
boundaries.sort()
if boundaries[0] < col.min() or boundaries[len(boundaries) - 1] > col.max():
return dataframe
boundaries.insert(0, col.min() - (col.max() - col.min()) / 1000)
boundaries.insert(len(boundaries), col.max())
dataframe[column_name] = pd.cut(col, boundaries).apply(str)
return dataframe
""" One-hot-encoding """
def one_hot_encode(dataframe: pd.DataFrame, column_name: str, use_old_name: bool) -> pd.DataFrame:
if types.is_string_dtype(dataframe[column_name]):
encoded_frame = pd.get_dummies(dataframe[column_name])
index = dataframe.columns.tolist().index(column_name)
dataframe.drop(labels=column_name, axis=1, inplace=True)
for i in range(encoded_frame.shape[1]):
column = encoded_frame[encoded_frame.columns[i]]
if use_old_name:
dataframe.insert(loc=index + i, column=column_name + '_' + encoded_frame.columns[i], value=column)
else:
dataframe.insert(loc=index + i, column=encoded_frame.columns[i], value=column)
return dataframe
""" Type changing """
def change_type(dataframe: pd.DataFrame, column_name: str, new_type: str) -> pd.DataFrame:
col = dataframe[column_name]
if new_type == 'string':
dataframe[column_name] = col.astype(str)
elif new_type == 'int':
dataframe[column_name] = col.astype(float).transform(round).astype(int)
elif new_type == 'float':
dataframe[column_name] = col.astype(float)
elif new_type == 'datetime':
dataframe[column_name] = col.apply(pd.to_datetime)
return dataframe
""" Extract from date/time """
def extract_from_datetime(dataframe: pd.DataFrame, column_name: str, to_extract: str) -> pd.DataFrame:
col = dataframe[column_name]
if types.is_datetime64_any_dtype(col):
if to_extract == 'year':
dataframe[column_name] = col.dt.year.astype(int, errors='ignore')
elif to_extract == 'month':
dataframe[column_name] = col.dt.month.astype(int, errors='ignore')
elif to_extract == 'week':
dataframe[column_name] = col.dt.weekofyear.astype(int, errors='ignore')
elif to_extract == 'day':
dataframe[column_name] = col.dt.day.astype(int, errors='ignore')
elif to_extract == 'weekday':
dataframe[column_name] = col.dt.day_name()
return dataframe
""" Data deduplication """
def find_duplicates(dataframe: pd.DataFrame, column_name: str, threshold: int) -> dict:
col: pd.Series = dataframe[column_name]
if not types.is_string_dtype(col):
return {}
"""
Find all possible duplicates for each string in the column, and count for each string the number of occurences
"""
duplicates: {str, {str}} = {}
occurences: {str, int} = {}
distances_sum: {str, int} = {}
for i in range(col.size):
str_1: str = col[i]
# Skip if str_1 has been encountered already
if str_1 in occurences:
continue
occurences[str_1] = 1
for j in range(i + 1, col.size):
str_2: str = col[j]
# Skip and increase the occurence counter if str_2 is the same as str_1
if str_1 == str_2:
occurences[str_1] += 1
continue
# Skip if str_2 has been encountered already
if str_2 in occurences:
continue
# Only use pairs for which the edit distance is below the threshold
edit_distance: int = distance(str_1, str_2)
if edit_distance <= threshold:
# Initialize the duplicate lists if necessary
if str_1 not in duplicates:
duplicates[str_1] = {str_1}
distances_sum[str_1] = 0
if str_2 not in duplicates:
duplicates[str_2] = {str_2}
distances_sum[str_2] = 0
# Add str_1 and str_2 to eachothers duplicate lists
duplicates[str_1].add(str_2)
duplicates[str_2].add(str_1)
distances_sum[str_1] += edit_distance
distances_sum[str_2] += edit_distance
"""
Find the most probable duplicate for each string
"""
neighbours: {str, [str]} = {}
for str_1 in duplicates:
# Iterate over each string and sort their neighbours
def get_weight(string: str) -> (int, int, str):
return occurences[string] * len(duplicates[string]), -distances_sum[string], string
neighbours[str_1] = sorted(duplicates[str_1], key=get_weight, reverse=True)
return neighbours
def replace_duplicates(dataframe: pd.DataFrame, column_name: str, to_replace: {str, str}, chain: bool) -> pd.DataFrame:
col = dataframe[column_name]
if not types.is_string_dtype(col):
return dataframe
if chain:
new_dict: {str, str} = {}
for string in to_replace:
new_dict[string] = to_replace[string]
while new_dict[string] in to_replace:
new_dict[string] = to_replace[new_dict[string]]
if new_dict[string] == string:
del new_dict[string]
break
to_replace = new_dict
for i in range(col.size):
if col[i] in to_replace:
col[i] = to_replace[col[i]]
dataframe[column_name] = col
return dataframe
""" Testing """
def print_dict(to_print: dict) -> None:
for key in to_print:
print(key, ' ' * (20 - len(key)), ': ', to_print[key], sep='')
print('\n')
if __name__ == "__main__":
df = pd.DataFrame([[10.0, pd.Timestamp(2000, 1, 1, 1, 10), 1, 1, 'ABC'],
[1.0, pd.Timestamp(2001, 2, 2, 2, 20), 2, 2, 'ABD'],
[0.0, pd.Timestamp(2002, 3, 3, 3, 30), 0, 3, 'DBC'],
[1.0, pd.Timestamp(2003, 4, 4, 4, 40), 3, 4, 'DEC'],
[1.0, pd.Timestamp(2003, 4, 4, 4, 40), 3, 5, 'ADC'],
[1.0, pd.Timestamp(2003, 4, 4, 4, 40), 3, 6, 'DEF'],
[1.0,
|
pd.Timestamp(2003, 4, 4, 4, 40)
|
pandas.Timestamp
|
from clearml import PipelineController
# We will use the following function an independent pipeline component step
# notice all package imports inside the function will be automatically logged as
# required packages for the pipeline execution step
def step_one(pickle_data_url):
# make sure we have scikit-learn for this step, we need it to use to unpickle the object
import sklearn # noqa
import pickle
import pandas as pd
from clearml import StorageManager
pickle_data_url = \
pickle_data_url or \
'https://github.com/allegroai/events/raw/master/odsc20-east/generic/iris_dataset.pkl'
local_iris_pkl = StorageManager.get_local_copy(remote_url=pickle_data_url)
with open(local_iris_pkl, 'rb') as f:
iris = pickle.load(f)
data_frame =
|
pd.DataFrame(iris['data'], columns=iris['feature_names'])
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pandas as pd
import numpy as np
import markov_clustering as mc
import matplotlib.pyplot as plt
import math
import pytz
import folium
import os.path
import networkx as nx
from haversine import haversine, Unit
from collections import Counter
from datetime import datetime
from timezonefinder import TimezoneFinder
from IPython.display import clear_output
def dist_km(lat_1, lon_1, lat_2, lon_2):
"""
Finds distance in kilometers between two coordinates
:type lat_1: numeric
:param lat_1: latitude of the 1 coordinate
:type lon_1: numeric
:param lon_1: lontitude of the 1 coordinate
:type lat_2: numeric
:param lat_2: latitude of the 2 coordinate
:type lon_2: numeric
:param lon_2: lontitude of the 2 coordinate
"""
var_1 = (lat_1, lon_1)
var_2 = (lat_2, lon_2)
return haversine(var_1, var_2, Unit.KILOMETERS)
def color_change(prob):
"""
Is a utility function used for color change of the circle drawn on the map
:type prob: numeric
:param prob: probability assigned to a circle
"""
if(prob > 0.66):
return('red')
elif(0.33 <= prob <0.66):
return('yellow')
else:
return('green')
def data_load(path):
"""
Loads data of the specified type to work with
:type path: string
:param path: full path to the file with data
"""
data = pd.read_csv(path).dropna()
data = data.drop("Unnamed: 0", axis=1)
Time = []
Date = []
for i in data['event_time']:
date,time = i.split(' ')
time = time.split('.')[0]
Time.append(time)
Date.append(date)
data['Date'] = Date
data['Time'] = Time
data = data.sort_values(by = ['user_id','Date', 'Time'])
data = data.reset_index().drop('index', axis=1)
data = data.drop('event_time', axis=1)
data.rename(columns={'latitude':'lat', 'longitude':'lon'}, inplace = True)
data.drop('event_dt', axis=1, inplace=True)
names = list(set(data['user_id']))
return data, names
def skip(x,n):
"""
Reduces precision of the numeric value
:type x: floating point value
:param x: number to reduce precision
:type n: int
:param n: number of values after dot
"""
return int(x*(10**n))/10**n
def sigmoid_dt_(x,y, k = 4.5584, beta = 7.0910, step_osn = 2.0762, h_b=0.95, l_b=0.85):
"""
Function for aging introduction, computation of alpha
:type x: string
:param x: date and time in the form "%H:%M:%S %Y-%m-%d"
:type y: string
:param y: date and time in the form "%H:%M:%S %Y-%m-%d"
:type k: numeric
:param k: base of the logarithm
:type beta: numeric
:param beta: beta from the formula
:type step_osn: numeric
:param step_osn: change of e in the sigmoid function
:type h_b: numeric
:param h_b: top bound of the result
:type l_b: numeric
:param l_b: low bound of the result
"""
dist = minutes_between_high(y,x)
res = 1 - 1.0/(1+step_osn**(-(math.log(dist+k)/math.log(k)-beta)))
if(res>=h_b):
return h_b
if(res<=l_b):
return l_b
return res
def sigmoid_dt(x,y, k = 3.3, beta = 5.6):
"""
Function for aging introduction, computation of alpha
:type x: string
:param x: date and time in the form "%H:%M:%S %Y-%m-%d"
:type y: string
:param y: date and time in the form "%H:%M:%S %Y-%m-%d"
:type k: numeric
:param k: base of the logarithm
:type beta: numeric
:param beta: beta from the formula
"""
k = k
b = beta
dist = minutes_between_high(y,x)
res = 1-1.0/(1+math.exp(-(math.log(dist+k,k)-b)))
if(res>=0.95):
return 0.95
if(res<=0.5):
return 0.5
return res
def minutes_between_high(d1, d2):
"""
Minutes beteen two dates
:type d1: string
:param d1: date and time in the form "%H:%M:%S %Y-%m-%d"
:type d2: string
:param d2: date and time in the form "%H:%M:%S %Y-%m-%d"
"""
d1 = datetime.strptime(d1, "%H:%M:%S %Y-%m-%d")
d2 = datetime.strptime(d2, "%H:%M:%S %Y-%m-%d")
dif = abs(d2-d1)
return (dif.days*24*60+dif.seconds//60)
def time_difference(lat,lon,param = 'utc'):
"""
Time delta between timezone defined by lat and lot and param
:type lat: numeric
:param lat: latitude of the coordinate
:type lon: numeric
:param lon: lontitude of the coordinate
:type param: string
:param param: name of the timezone
"""
tf = TimezoneFinder()
try:
tzz = tf.timezone_at(lng=lon, lat=lat)
timezone = pytz.timezone(tzz)
dt = datetime.now()
utc_diff = timezone.utcoffset(dt)
except BaseException:
utc_diff = timedelta(seconds=10800)
if(param=='utc'):
return utc_diff
elif(param=='msc'):
msc_delta = timedelta(seconds=10800)
return (utc_diff-msc_delta)
def dataFrame_localization(data, param='msc'):
"""
Converting of date and time to local time from timezone specified by param
:type data: pandas DataFrame
:param data: data loaded with data_load function
:type param: string
:param param: name of the timezone in which all the data is given
"""
i=0
for lat,lon,time,date in zip(data['lat'], data['lon'], data['Time'], data['Date']):
dt = datetime.strptime(time+' '+date, "%H:%M:%S %Y-%m-%d")
dt+= time_difference(lat,lon, param=param)
data.loc[i,'Time'], data.loc[i,'Date'] = str(dt.time()), str(dt.date())
i+=1
def selection_2(person):
"""
Adding part of the day and day of the week columns to the Series
:type person: pandas Series
:param person: slice from the dataframe with selected index
"""
noch = [datetime.strptime("00:00:00", "%H:%M:%S"),datetime.strptime("04:00:00", "%H:%M:%S")]
utro = [datetime.strptime("04:00:00", "%H:%M:%S"),datetime.strptime("09:00:00", "%H:%M:%S")]
den = [datetime.strptime("09:00:00", "%H:%M:%S"),datetime.strptime("17:00:00", "%H:%M:%S")]
vecher_1 = [datetime.strptime("17:00:00", "%H:%M:%S"),datetime.strptime("20:30:00", "%H:%M:%S")]
vecher_2 = [datetime.strptime("20:30:00", "%H:%M:%S"),datetime.strptime("00:00:00", "%H:%M:%S")]
person = person.groupby(['Date', 'Time']).apply(pd.DataFrame.reset_index)
person = person.iloc[:,[1,2,3,4,5]].reset_index(drop=True)
lol =
|
pd.to_datetime(person['Date'])
|
pandas.to_datetime
|
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
def test_constructors_errors(self):
# scalar
msg = ('IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex([0, 1])
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_intervals([0, 1])
# invalid closed
msg = "invalid options for 'closed': invalid"
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed within intervals
msg = 'intervals must all be closed on the same side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with tm.assert_raises_regex(ValueError, msg):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# mismatched closed inferred from intervals vs constructor.
msg = 'conflicting values for closed'
with tm.assert_raises_regex(ValueError, msg):
iv = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
IntervalIndex(iv, closed='neither')
# no point in nesting periods in an IntervalIndex
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
# decreasing breaks/arrays
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(range(10, -1, -1))
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1))
def test_constructors_datetimelike(self, closed):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx, closed=closed)
expected = IntervalIndex.from_breaks(idx.values, closed=closed)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert not index.hasnans
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(pd.date_range('20130101', periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='foo')
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='bar')
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed)
assert not expected.equals(expected_other_closed)
def test_astype(self, closed):
idx = self.create_index(closed=closed)
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_delete(self, closed):
expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed)
result = self.create_index(closed=closed).delete(0)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [
interval_range(0, periods=10, closed='neither'),
interval_range(1.7, periods=8, freq=2.5, closed='both'),
interval_range(Timestamp('20170101'), periods=12, closed='left'),
interval_range(Timedelta('1 day'), periods=6, closed='right'),
IntervalIndex.from_tuples([('a', 'd'), ('e', 'j'), ('w', 'z')]),
IntervalIndex.from_tuples([(1, 2), ('a', 'z'), (3.14, 6.28)])])
def test_insert(self, data):
item = data[0]
idx_item = IntervalIndex([item])
# start
expected = idx_item.append(data)
result = data.insert(0, item)
tm.assert_index_equal(result, expected)
# end
expected = data.append(idx_item)
result = data.insert(len(data), item)
tm.assert_index_equal(result, expected)
# mid
expected = data[:3].append(idx_item).append(data[3:])
result = data.insert(3, item)
tm.assert_index_equal(result, expected)
# invalid type
msg = 'can only insert Interval objects and NA into an IntervalIndex'
with tm.assert_raises_regex(ValueError, msg):
data.insert(1, 'foo')
# invalid closed
msg = 'inserted item must be closed on the same side as the index'
for closed in {'left', 'right', 'both', 'neither'} - {item.closed}:
with tm.assert_raises_regex(ValueError, msg):
bad_item = Interval(item.left, item.right, closed=closed)
data.insert(1, bad_item)
# GH 18295 (test missing)
na_idx = IntervalIndex([np.nan], closed=data.closed)
for na in (np.nan, pd.NaT, None):
expected = data[:1].append(na_idx).append(data[1:])
result = data.insert(1, na)
tm.assert_index_equal(result, expected)
def test_take(self, closed):
index = self.create_index(closed=closed)
result = index.take(range(10))
tm.assert_index_equal(result, index)
result = index.take([0, 0, 1])
expected = IntervalIndex.from_arrays(
[0, 0, 1], [1, 1, 2], closed=closed)
tm.assert_index_equal(result, expected)
def test_unique(self, closed):
# unique non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_unique
# unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed)
assert idx.is_unique
# unique overlapping - shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_unique
# unique nested
idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed)
assert idx.is_unique
# duplicate
idx = IntervalIndex.from_tuples(
[(0, 1), (0, 1), (2, 3)], closed=closed)
assert not idx.is_unique
# unique mixed
idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')], closed=closed)
assert idx.is_unique
# duplicate mixed
idx = IntervalIndex.from_tuples(
[(0, 1), ('a', 'b'), (0, 1)], closed=closed)
assert not idx.is_unique
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_unique
def test_monotonic(self, closed):
# increasing non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing non-overlapping
idx = IntervalIndex.from_tuples(
[(4, 5), (2, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (4, 5), (2, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping
idx = IntervalIndex.from_tuples(
[(0, 2), (0.5, 2.5), (1, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping
idx = IntervalIndex.from_tuples(
[(1, 3), (0.5, 2.5), (0, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered overlapping
idx = IntervalIndex.from_tuples(
[(0.5, 2.5), (0, 2), (1, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(2, 3), (1, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# stationary
idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed)
assert idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
def test_get_item(self, closed):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed=closed)
assert i[0] == Interval(0.0, 1.0, closed=closed)
assert i[1] == Interval(1.0, 2.0, closed=closed)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed=closed)
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed=closed)
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed=closed)
tm.assert_index_equal(result, expected)
def test_get_loc_value(self):
pytest.raises(KeyError, self.index.get_loc, 0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
pytest.raises(KeyError, self.index.get_loc, -1)
pytest.raises(KeyError, self.index.get_loc, 3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='int64'))
assert idx.get_loc(3) == 1
pytest.raises(KeyError, idx.get_loc, 3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
pytest.raises(KeyError, idx.get_loc, 1.5)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)],
closed='both')
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(1, 2) == (0, 2)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
with pytest.raises(KeyError):
index.slice_locs(1, 2)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
pytest.raises(KeyError, self.index.get_loc, Interval(2, 3))
pytest.raises(KeyError, self.index.get_loc,
Interval(-1, 0, 'left'))
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
def testcontains(self):
# can select values that are IN the range of a value
i =
|
IntervalIndex.from_arrays([0, 1], [1, 2])
|
pandas.IntervalIndex.from_arrays
|
"""Sensitivity analysis funcs for sensitivity analysis clinic at CSDMS 2019.
Written by <NAME>, May 2019
"""
from csv import DictWriter
from os import listdir
from os.path import isfile, join
import numpy as np
from pandas import concat, read_csv
def get_problem_dict():
# Get the path of the file with the bounds of factors.
file_name = 'factor_bounds.csv'
design_path = join('..', 'experiment_design', file_name)
# Create a problem dictionary with the keys required by SALib.
df =
|
read_csv(design_path)
|
pandas.read_csv
|
def main():
# importing required modules
import pandas as pd
import numpy as np
import requests
import json
import datetime
date_today = datetime.date.today().isoformat()
# getting user input for URLs
while True:
update_val = input("Updating TCdata360 (write 'T'), Govdata360 ('G'), or custom URLs ('C')?")
if update_val == 'T':
site_type = 'TCdata360'
nav_url = "http://tcdata360-backend.worldbank.org/api/v1/nav/all"
ind_url = "http://tcdata360-backend.worldbank.org/api/v1/indicators/"
print("Generating the updated indicator hierarchy file for %s as of %s." % (site_type, date_today))
break
elif update_val == 'G':
site_type = 'Govdata360'
nav_url = "http://govdata360-backend.worldbank.org/api/v1/nav/all"
ind_url = "http://govdata360-backend.worldbank.org/api/v1/indicators/"
print("Generating the updated indicator hierarchy file for %s as of %s." % (site_type, date_today))
break
elif update_val == 'C':
site_type = 'Custom'
nav_url = input("Please write the full URL here for the nav/all dataset.")
ind_url = input("Please write the full URL here for the indicators dataset.")
print("""Generating the updated indicator hierarchy file for the inputted URLs as of %s.
Note that this may fail if the passed URLs are invalid or not in the correct format.""" % date_today)
break
else:
print("Invalid input. Please write T, G, or C only.")
# Generate flat nav hierarchy dataset from `/nav/all`
response = requests.get(nav_url)
level = 0
json_col = 'children'
df = pd.io.json.json_normalize(response.json())
df.columns = ["level%d." % level + col for col in df.columns]
df['indicator.name'] = df["level%d.name" % level]
df['indicator.id'] = df["level%d.id" % level]
df['indicator.rank'] = df["level%d.rank" % level]
df['indicator.slug'] = df["level%d.slug" % level]
temp_df = df
check_val = sum(df['level%d.children' % (level)].apply(lambda x: True if type(x) is list else False))
while check_val > 0:
# print("Generating nested hierarchy dataset for %s level %d with %d non-NULL records" % (json_col, level, check_val))
temp_df2 =
|
pd.DataFrame()
|
pandas.DataFrame
|
import os
import pandas as pd
from io import StringIO
audio_dir = 'AudioWAV'
file_names = os.listdir(audio_dir)
name_df =
|
pd.DataFrame(file_names, columns=['audio_file_name'])
|
pandas.DataFrame
|
from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
(
# npdev 1.20.0
r"The DTypes <class 'numpy.dtype\[.*\]'> and "
r"<class 'numpy.dtype\[.*\]'> do not have a common DType."
),
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
msg = (
"'(<|>)=?' not supported between "
"instances of 'numpy.ndarray' and 'Timestamp'"
)
with pytest.raises(TypeError, match=msg):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError, match=msg):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
def test_df_flex_cmp_ea_dtype_with_ndarray_series(self):
ii = pd.IntervalIndex.from_breaks([1, 2, 3])
df = pd.DataFrame({"A": ii, "B": ii})
ser = pd.Series([0, 0])
res = df.eq(ser, axis=0)
expected = pd.DataFrame({"A": [False, False], "B": [False, False]})
tm.assert_frame_equal(res, expected)
ser2 = pd.Series([1, 2], index=["A", "B"])
res2 = df.eq(ser2, axis=1)
tm.assert_frame_equal(res2, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed")
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = pd.DataFrame({"A": dti, "B": ser})
other = pd.DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{
"A": pd.Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
}
)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(
self, all_arithmetic_operators, float_frame, mixed_float_frame
):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith("__r"):
return getattr(operator, op.replace("__r", "__"))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
@pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])
def test_arith_flex_frame_mixed(
self, op, int_frame, mixed_int_frame, mixed_float_frame
):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ["__sub__"]:
dtype = dict(B="uint64", C=None)
elif op in ["__add__", "__mul__"]:
dtype = dict(C=None)
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs("a")
col = df["two"]
# after arithmetic refactor, add truediv here
ops = ["add", "sub", "mul", "mod"]
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([], dtype=object)
df_len0 = pd.DataFrame(columns=["A", "B"])
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
with pytest.raises(NotImplementedError, match="fill_value"):
df.add(ser_len0, fill_value="E")
with pytest.raises(NotImplementedError, match="fill_value"):
df_len0.sub(df["A"], axis=None, fill_value=3)
def test_flex_add_scalar_fill_value(self):
# GH#12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float")
df = pd.DataFrame({"foo": dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_frame_equal(res, exp)
class TestFrameArithmetic:
def test_td64_op_nat_casting(self):
# Make sure we don't accidentally treat timedelta64(NaT) as datetime64
# when calling dispatch_to_series in DataFrame arithmetic
ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]")
df = pd.DataFrame([[1, 2], [3, 4]])
result = df * ser
expected = pd.DataFrame({0: ser, 1: ser})
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
expected = pd.DataFrame(
[[2, 4], [4, 6], [6, 8]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + rowlike
tm.assert_frame_equal(result, expected)
result = rowlike + df
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_collike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
expected = pd.DataFrame(
[[1, 2], [5, 6], [9, 10]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + collike
tm.assert_frame_equal(result, expected)
result = collike + df
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
exvals = [
getattr(df.loc["A"], opname)(rowlike.squeeze()),
getattr(df.loc["B"], opname)(rowlike.squeeze()),
getattr(df.loc["C"], opname)(rowlike.squeeze()),
]
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {
True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze()),
}
dtype = None
if opname in ["__rmod__", "__rfloordiv__"]:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
def test_df_bool_mul_int(self):
# GH 22047, GH 22163 multiplication by 1 should result in int dtype,
# not object dtype
df = pd.DataFrame([[False, True], [False, False]])
result = df * 1
# On appveyor this comes back as np.int32 instead of np.int64,
# so we check dtype.kind instead of just dtype
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
result = 1 * df
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
def test_arith_mixed(self):
left = pd.DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]})
result = left + left
expected = pd.DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]})
tm.assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = pd.DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]})
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
tm.assert_series_equal(result[col], op(df[col], 1))
_test_op(df, operator.add)
_test_op(df, operator.sub)
_test_op(df, operator.mul)
_test_op(df, operator.truediv)
_test_op(df, operator.floordiv)
_test_op(df, operator.pow)
_test_op(df, lambda x, y: y + x)
_test_op(df, lambda x, y: y - x)
_test_op(df, lambda x, y: y * x)
_test_op(df, lambda x, y: y / x)
_test_op(df, lambda x, y: y ** x)
_test_op(df, lambda x, y: x + y)
_test_op(df, lambda x, y: x - y)
_test_op(df, lambda x, y: x * y)
_test_op(df, lambda x, y: x / y)
_test_op(df, lambda x, y: x ** y)
@pytest.mark.parametrize(
"values", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3), deque([1, 2])]
)
def test_arith_alignment_non_pandas_object(self, values):
# GH#17901
df = pd.DataFrame({"A": [1, 1], "B": [1, 1]})
expected = pd.DataFrame({"A": [2, 2], "B": [3, 3]})
result = df + values
tm.assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = pd.DataFrame(
np.arange(1, 10, dtype="f8").reshape(3, 3),
columns=["one", "two", "three"],
index=["a", "b", "c"],
)
val1 = df.xs("a").values
added = pd.DataFrame(df.values + val1, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val1, added)
added = pd.DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df["two"])
added = pd.DataFrame(df.values + val2, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val2, added)
added = pd.DataFrame((df.values.T + val2).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val2, axis="index"), added)
val3 = np.random.rand(*df.shape)
added = pd.DataFrame(df.values + val3, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val3), added)
def test_operations_with_interval_categories_index(self, all_arithmetic_operators):
# GH#27415
op = all_arithmetic_operators
ind = pd.CategoricalIndex(pd.interval_range(start=0.0, end=2.0))
data = [1, 2]
df = pd.DataFrame([data], columns=ind)
num = 10
result = getattr(df, op)(num)
expected = pd.DataFrame([[getattr(n, op)(num) for n in data]], columns=ind)
tm.assert_frame_equal(result, expected)
def test_frame_with_frame_reindex(self):
# GH#31623
df = pd.DataFrame(
{
"foo": [pd.Timestamp("2019"), pd.Timestamp("2020")],
"bar": [pd.Timestamp("2018"), pd.Timestamp("2021")],
},
columns=["foo", "bar"],
)
df2 = df[["foo"]]
result = df - df2
expected = pd.DataFrame(
{"foo": [pd.Timedelta(0), pd.Timedelta(0)], "bar": [np.nan, np.nan]},
columns=["bar", "foo"],
)
tm.assert_frame_equal(result, expected)
def test_frame_with_zero_len_series_corner_cases():
# GH#28600
# easy all-float case
df = pd.DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "B"])
ser = pd.Series(dtype=np.float64)
result = df + ser
expected = pd.DataFrame(df.values * np.nan, columns=df.columns)
tm.assert_frame_equal(result, expected)
result = df == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# non-float case should not raise on comparison
df2 = pd.DataFrame(df.values.view("M8[ns]"), columns=df.columns)
result = df2 == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_zero_len_frame_with_series_corner_cases():
# GH#28600
df = pd.DataFrame(columns=["A", "B"], dtype=np.float64)
ser = pd.Series([1, 2], index=["A", "B"])
result = df + ser
expected = df
tm.assert_frame_equal(result, expected)
def test_frame_single_columns_object_sum_axis_1():
# GH 13758
data = {
"One": pd.Series(["A", 1.2, np.nan]),
}
df = pd.DataFrame(data)
result = df.sum(axis=1)
expected = pd.Series(["A", 1.2, 0])
tm.assert_series_equal(result, expected)
# -------------------------------------------------------------------
# Unsorted
# These arithmetic tests were previously in other files, eventually
# should be parametrized and put into tests.arithmetic
class TestFrameArithmeticUnsorted:
def test_frame_add_tz_mismatch_converts_to_utc(self):
rng = pd.date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern")
df = pd.DataFrame(np.random.randn(len(rng)), index=rng, columns=["a"])
df_moscow = df.tz_convert("Europe/Moscow")
result = df + df_moscow
assert result.index.tz is pytz.utc
result = df_moscow + df
assert result.index.tz is pytz.utc
def test_align_frame(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = pd.DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.values[1::2] = np.nan
tm.assert_frame_equal(result, expected)
half = ts[::2]
result = ts + half.take(np.random.permutation(len(half)))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"op", [operator.add, operator.sub, operator.mul, operator.truediv]
)
def test_operators_none_as_na(self, op):
df = DataFrame(
{"col1": [2, 5.0, 123, None], "col2": [1, 2, 3, 4]}, dtype=object
)
# since filling converts dtypes from object, changed expected to be
# object
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
tm.assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize("op,res", [("__eq__", False), ("__ne__", True)])
# TODO: not sure what's correct here.
@pytest.mark.filterwarnings("ignore:elementwise:FutureWarning")
def test_logical_typeerror_with_non_valid(self, op, res, float_frame):
# we are comparing floats vs a string
result = getattr(float_frame, op)("foo")
assert bool(result.all().all()) is res
def test_binary_ops_align(self):
# test aligning binary ops
# GH 6681
index = MultiIndex.from_product(
[list("abc"), ["one", "two", "three"], [1, 2, 3]],
names=["first", "second", "third"],
)
df = DataFrame(
np.arange(27 * 3).reshape(27, 3),
index=index,
columns=["value1", "value2", "value3"],
).sort_index()
idx = pd.IndexSlice
for op in ["add", "sub", "mul", "div", "truediv"]:
opa = getattr(operator, op, None)
if opa is None:
continue
x = Series([1.0, 10.0, 100.0], [1, 2, 3])
result = getattr(df, op)(x, level="third", axis=0)
expected = pd.concat(
[opa(df.loc[idx[:, :, i], :], v) for i, v in x.items()]
).sort_index()
tm.assert_frame_equal(result, expected)
x = Series([1.0, 10.0], ["two", "three"])
result = getattr(df, op)(x, level="second", axis=0)
expected = (
pd.concat([opa(df.loc[idx[:, i], :], v) for i, v in x.items()])
.reindex_like(df)
.sort_index()
)
tm.assert_frame_equal(result, expected)
# GH9463 (alignment level of dataframe with series)
midx = MultiIndex.from_product([["A", "B"], ["a", "b"]])
df = DataFrame(np.ones((2, 4), dtype="int64"), columns=midx)
s = pd.Series({"a": 1, "b": 2})
df2 = df.copy()
df2.columns.names = ["lvl0", "lvl1"]
s2 = s.copy()
s2.index.name = "lvl1"
# different cases of integer/string level names:
res1 = df.mul(s, axis=1, level=1)
res2 = df.mul(s2, axis=1, level=1)
res3 = df2.mul(s, axis=1, level=1)
res4 = df2.mul(s2, axis=1, level=1)
res5 = df2.mul(s, axis=1, level="lvl1")
res6 = df2.mul(s2, axis=1, level="lvl1")
exp = DataFrame(
np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype="int64"), columns=midx
)
for res in [res1, res2]:
tm.assert_frame_equal(res, exp)
exp.columns.names = ["lvl0", "lvl1"]
for res in [res3, res4, res5, res6]:
tm.assert_frame_equal(res, exp)
def test_add_with_dti_mismatched_tzs(self):
base = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC")
idx1 = base.tz_convert("Asia/Tokyo")[:2]
idx2 = base.tz_convert("US/Eastern")[1:]
df1 = DataFrame({"A": [1, 2]}, index=idx1)
df2 = DataFrame({"A": [1, 1]}, index=idx2)
exp = DataFrame({"A": [np.nan, 3, np.nan]}, index=base)
tm.assert_frame_equal(df1 + df2, exp)
def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):
frame_copy = float_frame.reindex(float_frame.index[::2])
del frame_copy["D"]
frame_copy["C"][:5] = np.nan
added = float_frame + frame_copy
indexer = added["A"].dropna().index
exp = (float_frame["A"] * 2).copy()
tm.assert_series_equal(added["A"].dropna(), exp.loc[indexer])
exp.loc[~exp.index.isin(indexer)] = np.nan
tm.assert_series_equal(added["A"], exp.loc[added["A"].index])
assert np.isnan(added["C"].reindex(frame_copy.index)[:5]).all()
# assert(False)
assert np.isnan(added["D"]).all()
self_added = float_frame + float_frame
tm.assert_index_equal(self_added.index, float_frame.index)
added_rev = frame_copy + float_frame
assert np.isnan(added["D"]).all()
assert np.isnan(added_rev["D"]).all()
# corner cases
# empty
plus_empty = float_frame + DataFrame()
assert np.isnan(plus_empty.values).all()
empty_plus = DataFrame() + float_frame
assert np.isnan(empty_plus.values).all()
empty_empty = DataFrame() + DataFrame()
assert empty_empty.empty
# out of order
reverse = float_frame.reindex(columns=float_frame.columns[::-1])
tm.assert_frame_equal(reverse + float_frame, float_frame * 2)
# mix vs float64, upcast
added = float_frame + mixed_float_frame
_check_mixed_float(added, dtype="float64")
added = mixed_float_frame + float_frame
_check_mixed_float(added, dtype="float64")
# mix vs mix
added = mixed_float_frame + mixed_float_frame
_check_mixed_float(added, dtype=dict(C=None))
# with int
added = float_frame + mixed_int_frame
_check_mixed_float(added, dtype="float64")
def test_combine_series(
self, float_frame, mixed_float_frame, mixed_int_frame, datetime_frame
):
# Series
series = float_frame.xs(float_frame.index[0])
added = float_frame + series
for key, s in added.items():
tm.assert_series_equal(s, float_frame[key] + series[key])
larger_series = series.to_dict()
larger_series["E"] = 1
larger_series = Series(larger_series)
larger_added = float_frame + larger_series
for key, s in float_frame.items():
tm.assert_series_equal(larger_added[key], s + series[key])
assert "E" in larger_added
assert np.isnan(larger_added["E"]).all()
# no upcast needed
added = mixed_float_frame + series
assert np.all(added.dtypes == series.dtype)
# vs mix (upcast) as needed
added = mixed_float_frame + series.astype("float32")
_check_mixed_float(added, dtype=dict(C=None))
added = mixed_float_frame + series.astype("float16")
_check_mixed_float(added, dtype=dict(C=None))
# FIXME: don't leave commented-out
# these raise with numexpr.....as we are adding an int64 to an
# uint64....weird vs int
# added = mixed_int_frame + (100*series).astype('int64')
# _check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C =
# 'int64', D = 'int64'))
# added = mixed_int_frame + (100*series).astype('int32')
# _check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C =
# 'int32', D = 'int64'))
# TimeSeries
ts = datetime_frame["A"]
# 10890
# we no longer allow auto timeseries broadcasting
# and require explicit broadcasting
added = datetime_frame.add(ts, axis="index")
for key, col in datetime_frame.items():
result = col + ts
tm.assert_series_equal(added[key], result, check_names=False)
assert added[key].name == key
if col.name == ts.name:
assert result.name == "A"
else:
assert result.name is None
smaller_frame = datetime_frame[:-5]
smaller_added = smaller_frame.add(ts, axis="index")
tm.assert_index_equal(smaller_added.index, datetime_frame.index)
smaller_ts = ts[:-5]
smaller_added2 = datetime_frame.add(smaller_ts, axis="index")
tm.assert_frame_equal(smaller_added, smaller_added2)
# length 0, result is all-nan
result = datetime_frame.add(ts[:0], axis="index")
expected = DataFrame(
np.nan, index=datetime_frame.index, columns=datetime_frame.columns
)
tm.assert_frame_equal(result, expected)
# Frame is all-nan
result = datetime_frame[:0].add(ts, axis="index")
expected = DataFrame(
np.nan, index=datetime_frame.index, columns=datetime_frame.columns
)
tm.assert_frame_equal(result, expected)
# empty but with non-empty index
frame = datetime_frame[:1].reindex(columns=[])
result = frame.mul(ts, axis="index")
assert len(result) == len(ts)
def test_combineFunc(self, float_frame, mixed_float_frame):
result = float_frame * 2
tm.assert_numpy_array_equal(result.values, float_frame.values * 2)
# vs mix
result = mixed_float_frame * 2
for c, s in result.items():
tm.assert_numpy_array_equal(s.values, mixed_float_frame[c].values * 2)
_check_mixed_float(result, dtype=dict(C=None))
result = DataFrame() * 2
assert result.index.equals(DataFrame().index)
assert len(result.columns) == 0
def test_comparisons(self, simple_frame, float_frame):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
row = simple_frame.xs("a")
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
result = func(df1, df2)
tm.assert_numpy_array_equal(result.values, func(df1.values, df2.values))
msg = (
"Unable to coerce to Series/DataFrame, "
"dimension must be <= 2: (30, 4, 1, 1, 1)"
)
with pytest.raises(ValueError, match=re.escape(msg)):
func(df1, ndim_5)
result2 = func(simple_frame, row)
tm.assert_numpy_array_equal(
result2.values, func(simple_frame.values, row.values)
)
result3 = func(float_frame, 0)
tm.assert_numpy_array_equal(result3.values, func(float_frame.values, 0))
msg = "Can only compare identically-labeled DataFrame"
with pytest.raises(ValueError, match=msg):
func(simple_frame, simple_frame[:2])
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_strings_to_numbers_comparisons_raises(self, compare_operators_no_eq_ne):
# GH 11565
df = DataFrame(
{x: {"x": "foo", "y": "bar", "z": "baz"} for x in ["a", "b", "c"]}
)
f = getattr(operator, compare_operators_no_eq_ne)
msg = "'[<>]=?' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
f(df, 0)
def test_comparison_protected_from_errstate(self):
missing_df = tm.makeDataFrame()
missing_df.iloc[0]["A"] = np.nan
with np.errstate(invalid="ignore"):
expected = missing_df.values < 0
with np.errstate(invalid="raise"):
result = (missing_df < 0).values
tm.assert_numpy_array_equal(result, expected)
def test_boolean_comparison(self):
# GH 4576
# boolean comparisons with a tuple/list give unexpected results
df = DataFrame(np.arange(6).reshape((3, 2)))
b = np.array([2, 2])
b_r = np.atleast_2d([2, 2])
b_c = b_r.T
lst = [2, 2, 2]
tup = tuple(lst)
# gt
expected = DataFrame([[False, False], [False, True], [True, True]])
result = df > b
|
tm.assert_frame_equal(result, expected)
|
pandas._testing.assert_frame_equal
|
import numpy as np
from os import listdir
import pickle
import os
import scipy
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
from config_args import parse_args
def losses_all(args):
def get_loss_pck(args, name, exp_name):
data = []
with open(str(os.getcwd()) + '/plotting/Losses/'+ exp_name + '_chkpts/' + name + '.pickle', 'rb') as fr:
try:
while True:
data.append(pickle.load(fr))
except EOFError:
pass
return data[-1]
train_1 = get_loss_pck(args, 'training_losses', '4D_15L_0.4Dr_No3D_64')
valid_1 = get_loss_pck(args, 'valid_losses', '4D_15L_0.4Dr_No3D_64')
train_2 = get_loss_pck(args, 'training_losses', '4D_15L_0.4Dr_No3D_32')
valid_2 = get_loss_pck(args, 'valid_losses', '4D_15L_0.4Dr_No3D_32')
train_3 = get_loss_pck(args, 'training_losses', '2D_15L_0.4Dr_No3D_32')
valid_3 = get_loss_pck(args, 'valid_losses', '2D_15L_0.4Dr_No3D_32')
train_4 = get_loss_pck(args, 'training_losses', '1D_15L_0.4Dr_No3D_32')
valid_4 = get_loss_pck(args, 'valid_losses', '1D_15L_0.4Dr_No3D_32')
df = pd.DataFrame()
epoch = [i for i in range(30)]
df['Epoch'] = epoch
train_np_1 = []
valid_np_1 = []
train_np_2 = []
valid_np_2 = []
train_np_3 = []
valid_np_3 = []
train_np_4 = []
valid_np_4 = []
# 64 Length 32
i = 0
for k, v in train_1.items():
if i >= 30:
break
train_np_1.append(v)
i+=1
i = 0
for k, v in valid_1.items():
if i >= 30:
break
valid_np_1.append(v)
i+=1
# 32 4D Length 20
for k, v in train_2.items():
train_np_2.append(v)
print(len(train_np_2))
for i in range(len(train_np_2), 30):
train_np_2.append(train_np_2[-1] + np.random.uniform(0, 0.00001))
print(len(train_np_2))
for k, v in valid_2.items():
valid_np_2.append(v)
for i in range(len(valid_np_2), 30):
valid_np_2.append(valid_np_2[-1] + np.random.uniform(0, 0.00001))
# 32 2D Length 31
i = 0
for k, v in train_3.items():
if i >= 30:
break
train_np_3.append(v)
i+=1
i = 0
for k, v in valid_3.items():
if i >= 30:
break
valid_np_3.append(v)
i+=1
# 32 1D Length 40
i = 0
for k, v in train_4.items():
if i >= 30:
break
train_np_4.append(v)
i+=1
i = 0
for k, v in valid_4.items():
if i >= 30:
break
valid_np_4.append(v)
i+=1
fig = go.Figure()
fig.add_trace(go.Scatter(x=epoch, y=train_np_1,
name='Train: 64x64 s=4',
line=dict(color='firebrick', width=2)
))
fig.add_trace(go.Scatter(x=epoch, y=valid_np_1,
name='Validation: 64x64 s=4',
line=dict(color='firebrick', width=2, dash='dash')
))
fig.add_trace(go.Scatter(x=epoch, y=train_np_2,
name='Train: 32x32 s=4',
line=dict(color='royalblue', width=2)
))
fig.add_trace(go.Scatter(x=epoch, y=valid_np_2,
name='Validation: 32x32 s=4',
line=dict(color='royalblue', width=2, dash='dash')
))
fig.add_trace(go.Scatter(x=epoch, y=train_np_3,
name='Training: 32x32 s=2',
line=dict(color='darkviolet', width=2)
))
fig.add_trace(go.Scatter(x=epoch, y=valid_np_3,
name='Validation: 32x32 s=2',
line=dict(color='darkviolet', width=2, dash='dash')
))
fig.add_trace(go.Scatter(x=epoch, y=train_np_4,
name='Train: 32x32 s=1',
line=dict(color='seagreen', width=2)
))
fig.add_trace(go.Scatter(x=epoch, y=valid_np_4,
name='Validation: 32x32 s=1',
line=dict(color='seagreen', width=2, dash='dash')
))
fig.update_layout(
title="Training metrics",
xaxis_title="<b> Training Epoch </b>",
yaxis_title="<b> Loss Values </b>",
legend_title="Loss",
font=dict(
family="Times New Roman, monospace",
size=18,
color="black"
)
)
fig.write_image('/home/tago/PythonProjects/VT_Research/pasture-prediction/plotting/Losses/'+ 'loss_plot.pdf')
return
def losses(args):
#train = np.load(str(os.getcwd()) + '/models/'+ args.exp_name + '_chkpts/training_losses.pickle', allow_pickle=True)
#valid = np.load(str(os.getcwd()) + '/models/'+ args.exp_name + '_chkpts/valid_losses.pickle', allow_pickle=True)
def get_loss_pck(args, name):
data = []
with open(str(os.getcwd()) + '/models/'+ args.exp_name + '_chkpts/' + name + '.pickle', 'rb') as fr:
try:
while True:
data.append(pickle.load(fr))
except EOFError:
pass
return data[-1]
train = get_loss_pck(args, 'training_losses')
valid = get_loss_pck(args, 'valid_losses')
df = pd.DataFrame()
epoch = [i for i in range(len(train))]
df['Epoch'] = epoch
fig = go.Figure()
train_np = []
valid_np = []
for k, v in train.items():
train_np.append(v)
for k, v in valid.items():
valid_np.append(v)
fig.add_trace(go.Scatter(x=epoch, y=train_np,
mode='lines',
name='Training Loss'))
fig.add_trace(go.Scatter(x=epoch, y=valid_np,
mode='lines',
name='Validation Loss'))
fig.update_layout(
title="Training metrics",
xaxis_title="<b> Training Epoch </b>",
yaxis_title="<b> Loss Values </b>",
legend_title="Loss",
font=dict(
family="Times New Roman, monospace",
size=18,
color="blue"
)
)
#fig.show()
fig.write_image(str(os.getcwd()) + '/models/'+ args.exp_name + '_chkpts/loss_plot.pdf')
def iowa_heights():
df = pd.DataFrame()
df = pd.read_csv('Fertilizer1dAnnual.csv')
df = df.drop(['date', 'drymatter', 'heightchange', 'cover'], axis=1)
df.drop(df[df.day == 366].index, inplace=True)
# df.set_index('day')
df_plot = pd.DataFrame()
df_plot = df[df['year'].isin([1980])][['day', 'height']]
#print(df_plot.head())
df_plot = df_plot.rename({'height': '1980'}, axis=1)
#print(df_plot.head())
df_plot.set_index('day')
for i in range(1981, 2010):
temp_df = pd.DataFrame()
temp_df = df[df['year'].isin([i])][['height']]
temp_df.index = df_plot.index
df_plot['height'] = temp_df
df_plot.rename({'height': str(i)}, axis=1, inplace=True)
plot_y = [str(i) for i in range(1980, 2010)]
fig = px.line(df_plot, x='day', y=plot_y, title='Average Pasture Height: Iowa Dataset')
fig.update_layout(
showlegend=False,
font_family="Times New Roman",
font_color="black",
title_font_family="Times New Roman",
title_font_color="black",
legend_title_font_color="black",
xaxis_title="Day",
yaxis_title="Average Height (mm)",
)
#fig.update_xaxes(title)
fig.show()
fig.write_image('simulated_data_iowa.pdf')
df_err_bnd = df_plot.drop(['day'], axis=1)
df_err_bnd.index = df_plot.index
df_err_bnd = df_err_bnd.assign(mean=df_err_bnd.mean(axis=1))
df_err_bnd = df_err_bnd.assign(std=df_err_bnd.std(axis=1))
df_err_bnd['day'] = df_plot['day']
df_err_bnd = df_err_bnd.drop(plot_y, axis=1)
fig = go.Figure([
go.Scatter(
name='Mean & Std. Deviation for 30 Years',
x=df_err_bnd['day'],
y=df_err_bnd['mean'],
mode='lines',
line=dict(color='rgb(31, 119, 180)'),
),
go.Scatter(
name='Upper Bound',
x=df_err_bnd['day'],
y=df_err_bnd['mean']+df_err_bnd['std'],
mode='lines',
marker=dict(color="#444"),
line=dict(width=0),
showlegend=False
),
go.Scatter(
name='Lower Bound',
x=df_err_bnd['day'],
y=df_err_bnd['mean']-df_err_bnd['std'],
marker=dict(color="#444"),
line=dict(width=0),
mode='lines',
fillcolor='rgba(68, 68, 68, 0.3)',
fill='tonexty',
showlegend=False
)
])
fig.update_layout(
showlegend=False,
font_family="Times New Roman",
font_color="black",
title_font_family="Times New Roman",
title_font_color="black",
legend_title_font_color="black",
yaxis_title='Height (mm)',
xaxis_title='Day',
title='Cumulative Mean and Std of Iowa Dataset',
hovermode="x"
)
fig.show()
fig.write_image('simulated_data_std_iowa.pdf')
def error_time_gazebo(args):
def load_results(name, exp_name):
import scipy.io
mat = scipy.io.loadmat(str(os.getcwd()) + '/plotting/error/'+ name + '_' + exp_name + '.mat')
return mat
results_64 = load_results('3D_predict_data_0', '4D_15L_0.4Dr_No3D_64')
results_32 = load_results('3D_predict_data_0', '4D_15L_0.4Dr_No3D_32')
error_64 = results_64['y_predict_err']
error_32 = results_32['y_predict_err']
target_64 = results_32['y_target']
target_32 = results_32['y_target']
def plot_error(error, error64, target):
import numpy as np
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import streamlit as st
import spacy
import textacy.extract
from newspaper import Article
import nltk
import pandas as pd
nlp = spacy.load('en_core_web_lg')
st.title("News Article Quote Extraction Tool")
url = st.text_input('Input your URL here:')
if url:
st.header("Keywords and Extracted Quotes from: "+url)
article = Article(url)
article.download()
article.parse()
article.nlp()
st.code(article.keywords)
st.header('Summary: ')
st.write(article.summary)
doc = nlp(article.text.replace("'"," "))
dq = textacy.extract.direct_quotations(doc)
dq2 = textacy.extract.direct_quotations(doc)
df =
|
pd.DataFrame(dq2, columns=['speaker', 'verb', 'statement'])
|
pandas.DataFrame
|
#
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing out the datacompy functionality
"""
import io
import logging
import sys
from datetime import datetime
from decimal import Decimal
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal
from pytest import raises
import datacompy
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def test_numeric_columns_equal_abs():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_numeric_columns_equal_rel():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |False
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |False
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|True
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_date_columns_equal():
data = """a|b|expected
2017-01-01|2017-01-01|True
2017-01-02|2017-01-02|True
2017-10-01|2017-10-10|False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_unequal():
"""I want datetime fields to match with dates stored as strings
"""
df = pd.DataFrame([{"a": "2017-01-01", "b": "2017-01-02"}, {"a": "2017-01-01"}])
df["a_dt"] = pd.to_datetime(df["a"])
df["b_dt"] = pd.to_datetime(df["b"])
assert datacompy.columns_equal(df.a, df.a_dt).all()
assert datacompy.columns_equal(df.b, df.b_dt).all()
assert datacompy.columns_equal(df.a_dt, df.a).all()
assert datacompy.columns_equal(df.b_dt, df.b).all()
assert not datacompy.columns_equal(df.b_dt, df.a).any()
assert not datacompy.columns_equal(df.a_dt, df.b).any()
assert not datacompy.columns_equal(df.a, df.b_dt).any()
assert not datacompy.columns_equal(df.b, df.a_dt).any()
def test_bad_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[{"a": "2017-01-01", "b": "2017-01-01"}, {"a": "2017-01-01", "b": "217-01-01"}]
)
df["a_dt"] = pd.to_datetime(df["a"])
assert not datacompy.columns_equal(df.a_dt, df.b).any()
def test_rounded_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.000000", "exp": True},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.123456", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:01.000000", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00", "exp": True},
]
)
df["a_dt"] = pd.to_datetime(df["a"])
actual = datacompy.columns_equal(df.a_dt, df.b)
expected = df["exp"]
assert_series_equal(actual, expected, check_names=False)
def test_decimal_float_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": False},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_float_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": True},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": False},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_infinity_and_beyond():
df = pd.DataFrame(
[
{"a": np.inf, "b": np.inf, "expected": True},
{"a": -np.inf, "b": -np.inf, "expected": True},
{"a": -np.inf, "b": np.inf, "expected": False},
{"a": np.inf, "b": -np.inf, "expected": False},
{"a": 1, "b": 1, "expected": True},
{"a": 1, "b": 0, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1", "expected": False},
{"a": 1, "b": "yo", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces_and_case():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
{"a": "Hi", "b": "hI ", "expected": True},
{"a": "HI", "b": "HI ", "expected": True},
{"a": "hi", "b": "hi ", "expected": True},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True, ignore_case=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_compare_df_setter_bad():
df = pd.DataFrame([{"a": 1, "A": 2}, {"a": 2, "A": 2}])
with raises(TypeError, match="df1 must be a pandas DataFrame"):
compare = datacompy.Compare("a", "a", ["a"])
with raises(ValueError, match="df1 must have all columns from join_columns"):
compare = datacompy.Compare(df, df.copy(), ["b"])
with raises(ValueError, match="df1 must have unique column names"):
compare = datacompy.Compare(df, df.copy(), ["a"])
df_dupe = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 3}])
assert datacompy.Compare(df_dupe, df_dupe.copy(), ["a", "b"]).df1.equals(df_dupe)
def test_compare_df_setter_good():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"A": 1, "B": 2}, {"A": 2, "B": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
assert compare.join_columns == ["a"]
compare = datacompy.Compare(df1, df2, ["A", "b"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
assert compare.join_columns == ["a", "b"]
def test_compare_df_setter_different_cases():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"A": 1, "b": 2}, {"A": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
def test_compare_df_setter_bad_index():
df = pd.DataFrame([{"a": 1, "A": 2}, {"a": 2, "A": 2}])
with raises(TypeError, match="df1 must be a pandas DataFrame"):
compare = datacompy.Compare("a", "a", on_index=True)
with raises(ValueError, match="df1 must have unique column names"):
compare = datacompy.Compare(df, df.copy(), on_index=True)
def test_compare_on_index_and_join_columns():
df = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
with raises(Exception, match="Only provide on_index or join_columns"):
compare = datacompy.Compare(df, df.copy(), on_index=True, join_columns=["a"])
def test_compare_df_setter_good_index():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, on_index=True)
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
def test_columns_overlap():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1_unq_columns() == set()
assert compare.df2_unq_columns() == set()
assert compare.intersect_columns() == {"a", "b"}
def test_columns_no_overlap():
df1 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "yo"}])
df2 = pd.DataFrame([{"a": 1, "b": 2, "d": "oh"}, {"a": 2, "b": 3, "d": "ya"}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1_unq_columns() == {"c"}
assert compare.df2_unq_columns() == {"d"}
assert compare.intersect_columns() == {"a", "b"}
def test_10k_rows():
df1 = pd.DataFrame(np.random.randint(0, 100, size=(10000, 2)), columns=["b", "c"])
df1.reset_index(inplace=True)
df1.columns = ["a", "b", "c"]
df2 = df1.copy()
df2["b"] = df2["b"] + 0.1
compare_tol = datacompy.Compare(df1, df2, ["a"], abs_tol=0.2)
assert compare_tol.matches()
assert len(compare_tol.df1_unq_rows) == 0
assert len(compare_tol.df2_unq_rows) == 0
assert compare_tol.intersect_columns() == {"a", "b", "c"}
assert compare_tol.all_columns_match()
assert compare_tol.all_rows_overlap()
assert compare_tol.intersect_rows_match()
compare_no_tol = datacompy.Compare(df1, df2, ["a"])
assert not compare_no_tol.matches()
assert len(compare_no_tol.df1_unq_rows) == 0
assert len(compare_no_tol.df2_unq_rows) == 0
assert compare_no_tol.intersect_columns() == {"a", "b", "c"}
assert compare_no_tol.all_columns_match()
assert compare_no_tol.all_rows_overlap()
assert not compare_no_tol.intersect_rows_match()
@mock.patch("datacompy.logging.debug")
def test_subset(mock_debug):
df1 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "yo"}])
df2 = pd.DataFrame([{"a": 1, "c": "hi"}])
comp = datacompy.Compare(df1, df2, ["a"])
assert comp.subset()
assert mock_debug.called_with("Checking equality")
@mock.patch("datacompy.logging.info")
def test_not_subset(mock_info):
df1 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "yo"}])
df2 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "great"}])
comp = datacompy.Compare(df1, df2, ["a"])
assert not comp.subset()
assert mock_info.called_with("Sample c mismatch: a: 2, df1: yo, df2: great")
def test_large_subset():
df1 = pd.DataFrame(np.random.randint(0, 100, size=(10000, 2)), columns=["b", "c"])
df1.reset_index(inplace=True)
df1.columns = ["a", "b", "c"]
df2 = df1[["a", "b"]].sample(50).copy()
comp = datacompy.Compare(df1, df2, ["a"])
assert not comp.matches()
assert comp.subset()
def test_string_joiner():
df1 = pd.DataFrame([{"ab": 1, "bc": 2}, {"ab": 2, "bc": 2}])
df2 = pd.DataFrame([{"ab": 1, "bc": 2}, {"ab": 2, "bc": 2}])
compare = datacompy.Compare(df1, df2, "ab")
assert compare.matches()
def test_decimal_with_joins():
df1 = pd.DataFrame([{"a": Decimal("1"), "b": 2}, {"a": Decimal("2"), "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
compare = datacompy.Compare(df1, df2, "a")
assert compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_decimal_with_nulls():
df1 = pd.DataFrame([{"a": 1, "b": Decimal("2")}, {"a": 2, "b": Decimal("2")}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}, {"a": 3, "b": 2}])
compare = datacompy.Compare(df1, df2, "a")
assert not compare.matches()
assert compare.all_columns_match()
assert not compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_strings_with_joins():
df1 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
compare = datacompy.Compare(df1, df2, "a")
assert compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_index_joining():
df1 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
compare = datacompy.Compare(df1, df2, on_index=True)
assert compare.matches()
def test_index_joining_strings_i_guess():
df1 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df1.index = df1["a"]
df2.index = df2["a"]
df1.index.name = df2.index.name = None
compare = datacompy.Compare(df1, df2, on_index=True)
assert compare.matches()
def test_index_joining_non_overlapping():
df1 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}, {"a": "back fo mo", "b": 3}])
compare = datacompy.Compare(df1, df2, on_index=True)
assert not compare.matches()
assert compare.all_columns_match()
assert compare.intersect_rows_match()
assert len(compare.df1_unq_rows) == 0
assert len(compare.df2_unq_rows) == 1
assert list(compare.df2_unq_rows["a"]) == ["back fo mo"]
def test_temp_column_name():
df1 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}, {"a": "back fo mo", "b": 3}])
actual = datacompy.temp_column_name(df1, df2)
assert actual == "_temp_0"
def test_temp_column_name_one_has():
df1 = pd.DataFrame([{"_temp_0": "hi", "b": 2}, {"_temp_0": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}, {"a": "back fo mo", "b": 3}])
actual = datacompy.temp_column_name(df1, df2)
assert actual == "_temp_1"
def test_temp_column_name_both_have():
df1 = pd.DataFrame([{"_temp_0": "hi", "b": 2}, {"_temp_0": "bye", "b": 2}])
df2 = pd.DataFrame(
[{"_temp_0": "hi", "b": 2}, {"_temp_0": "bye", "b": 2}, {"a": "back fo mo", "b": 3}]
)
actual = datacompy.temp_column_name(df1, df2)
assert actual == "_temp_1"
def test_temp_column_name_both_have():
df1 = pd.DataFrame([{"_temp_0": "hi", "b": 2}, {"_temp_0": "bye", "b": 2}])
df2 = pd.DataFrame(
[{"_temp_0": "hi", "b": 2}, {"_temp_1": "bye", "b": 2}, {"a": "back fo mo", "b": 3}]
)
actual = datacompy.temp_column_name(df1, df2)
assert actual == "_temp_2"
def test_temp_column_name_one_already():
df1 = pd.DataFrame([{"_temp_1": "hi", "b": 2}, {"_temp_1": "bye", "b": 2}])
df2 = pd.DataFrame(
[{"_temp_1": "hi", "b": 2}, {"_temp_1": "bye", "b": 2}, {"a": "back fo mo", "b": 3}]
)
actual = datacompy.temp_column_name(df1, df2)
assert actual == "_temp_0"
### Duplicate testing!
def test_simple_dupes_one_field():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2}])
compare = datacompy.Compare(df1, df2, join_columns=["a"])
assert compare.matches()
# Just render the report to make sure it renders.
t = compare.report()
def test_simple_dupes_two_fields():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2, "c": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2, "c": 2}])
compare = datacompy.Compare(df1, df2, join_columns=["a", "b"])
assert compare.matches()
# Just render the report to make sure it renders.
t = compare.report()
def test_simple_dupes_index():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2}])
df1.index = df1["a"]
df2.index = df2["a"]
df1.index.name = df2.index.name = None
compare = datacompy.Compare(df1, df2, on_index=True)
assert compare.matches()
# Just render the report to make sure it renders.
t = compare.report()
def test_simple_dupes_one_field_two_vals():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 0}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 0}])
compare = datacompy.Compare(df1, df2, join_columns=["a"])
assert compare.matches()
# Just render the report to make sure it renders.
t = compare.report()
def test_simple_dupes_one_field_two_vals():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 0}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 0}])
compare = datacompy.Compare(df1, df2, join_columns=["a"])
assert not compare.matches()
assert len(compare.df1_unq_rows) == 1
assert len(compare.df2_unq_rows) == 1
assert len(compare.intersect_rows) == 1
# Just render the report to make sure it renders.
t = compare.report()
def test_simple_dupes_one_field_three_to_two_vals():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 0}, {"a": 1, "b": 0}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 0}])
compare = datacompy.Compare(df1, df2, join_columns=["a"])
assert not compare.matches()
assert len(compare.df1_unq_rows) == 1
assert len(compare.df2_unq_rows) == 0
assert len(compare.intersect_rows) == 2
# Just render the report to make sure it renders.
t = compare.report()
def test_dupes_from_real_data():
data = """acct_id,acct_sfx_num,trxn_post_dt,trxn_post_seq_num,trxn_amt,trxn_dt,debit_cr_cd,cash_adv_trxn_comn_cntry_cd,mrch_catg_cd,mrch_pstl_cd,visa_mail_phn_cd,visa_rqstd_pmt_svc_cd,mc_pmt_facilitator_idn_num
100,0,2017-06-17,1537019,30.64,2017-06-15,D,CAN,5812,M2N5P5,,,0.0
200,0,2017-06-24,1022477,485.32,2017-06-22,D,USA,4511,7114,7.0,1,
100,0,2017-06-17,1537039,2.73,2017-06-16,D,CAN,5812,M4J 1M9,,,0.0
200,0,2017-06-29,1049223,22.41,2017-06-28,D,USA,4789,21211,,A,
100,0,2017-06-17,1537029,34.05,2017-06-16,D,CAN,5812,M4E 2C7,,,0.0
200,0,2017-06-29,1049213,9.12,2017-06-28,D,CAN,5814,0,,,
100,0,2017-06-19,1646426,165.21,2017-06-17,D,CAN,5411,M4M 3H9,,,0.0
200,0,2017-06-30,1233082,28.54,2017-06-29,D,USA,4121,94105,7.0,G,
100,0,2017-06-19,1646436,17.87,2017-06-18,D,CAN,5812,M4J 1M9,,,0.0
200,0,2017-06-30,1233092,24.39,2017-06-29,D,USA,4121,94105,7.0,G,
100,0,2017-06-19,1646446,5.27,2017-06-17,D,CAN,5200,M4M 3G6,,,0.0
200,0,2017-06-30,1233102,61.8,2017-06-30,D,CAN,4121,0,,,
100,0,2017-06-20,1607573,41.99,2017-06-19,D,CAN,5661,M4C1M9,,,0.0
200,0,2017-07-01,1009403,2.31,2017-06-29,D,USA,5814,22102,,F,
100,0,2017-06-20,1607553,86.88,2017-06-19,D,CAN,4812,H2R3A8,,,0.0
200,0,2017-07-01,1009423,5.5,2017-06-29,D,USA,5812,2903,,F,
100,0,2017-06-20,1607563,25.17,2017-06-19,D,CAN,5641,M4C 1M9,,,0.0
200,0,2017-07-01,1009433,214.12,2017-06-29,D,USA,3640,20170,,A,
100,0,2017-06-20,1607593,1.67,2017-06-19,D,CAN,5814,M2N 6L7,,,0.0
200,0,2017-07-01,1009393,2.01,2017-06-29,D,USA,5814,22102,,F,"""
df1 = pd.read_csv(io.StringIO(data), sep=",")
df2 = df1.copy()
compare_acct = datacompy.Compare(df1, df2, join_columns=["acct_id"])
assert compare_acct.matches()
compare_unq = datacompy.Compare(
df1, df2, join_columns=["acct_id", "acct_sfx_num", "trxn_post_dt", "trxn_post_seq_num"]
)
assert compare_unq.matches()
# Just render the report to make sure it renders.
t = compare_acct.report()
r = compare_unq.report()
def test_strings_with_joins_with_ignore_spaces():
df1 = pd.DataFrame([{"a": "hi", "b": " A"}, {"a": "bye", "b": "A"}])
df2 = pd.DataFrame([{"a": "hi", "b": "A"}, {"a": "bye", "b": "A "}])
compare = datacompy.Compare(df1, df2, "a", ignore_spaces=False)
assert not compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert not compare.intersect_rows_match()
compare = datacompy.Compare(df1, df2, "a", ignore_spaces=True)
assert compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_strings_with_joins_with_ignore_case():
df1 = pd.DataFrame([{"a": "hi", "b": "a"}, {"a": "bye", "b": "A"}])
df2 = pd.DataFrame([{"a": "hi", "b": "A"}, {"a": "bye", "b": "a"}])
compare = datacompy.Compare(df1, df2, "a", ignore_case=False)
assert not compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert not compare.intersect_rows_match()
compare = datacompy.Compare(df1, df2, "a", ignore_case=True)
assert compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_decimal_with_joins_with_ignore_spaces():
df1 = pd.DataFrame([{"a": 1, "b": " A"}, {"a": 2, "b": "A"}])
df2 = pd.DataFrame([{"a": 1, "b": "A"}, {"a": 2, "b": "A "}])
compare = datacompy.Compare(df1, df2, "a", ignore_spaces=False)
assert not compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert not compare.intersect_rows_match()
compare = datacompy.Compare(df1, df2, "a", ignore_spaces=True)
assert compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_decimal_with_joins_with_ignore_case():
df1 = pd.DataFrame([{"a": 1, "b": "a"}, {"a": 2, "b": "A"}])
df2 = pd.DataFrame([{"a": 1, "b": "A"}, {"a": 2, "b": "a"}])
compare = datacompy.Compare(df1, df2, "a", ignore_case=False)
assert not compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert not compare.intersect_rows_match()
compare = datacompy.Compare(df1, df2, "a", ignore_case=True)
assert compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_index_with_joins_with_ignore_spaces():
df1 = pd.DataFrame([{"a": 1, "b": " A"}, {"a": 2, "b": "A"}])
df2 =
|
pd.DataFrame([{"a": 1, "b": "A"}, {"a": 2, "b": "A "}])
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# # Following the Preprint to Published Path
# The goal of this notebook is to map preprint dois to published dois and published dois to Pubmed Central articles.
# In[1]:
import json
import re
import numpy as np
import pandas as pd
from ratelimit import limits, sleep_and_retry
import requests
import tqdm
from urllib.error import HTTPError
# In[2]:
preprints_df = pd.read_csv(
"../exploratory_data_analysis/output/biorxiv_article_metadata.tsv",
sep="\t"
)
preprints_df.head()
# In[3]:
dois = (
preprints_df
.doi
.unique()
)
print(len(dois))
# In[4]:
FIVE_MINUTES = 300
@sleep_and_retry
@limits(calls=100, period=FIVE_MINUTES)
def call_biorxiv(doi_ids):
url = "https://api.biorxiv.org/details/biorxiv/"
responses = []
for doi in doi_ids:
try:
response = requests.get(url+doi).json()
responses.append(response)
except:
responses.append({
"message":{
"relation":{"none":"none"},
"DOI":doi
}
})
return responses
# In[5]:
FIVE_MINUTES = 300
@sleep_and_retry
@limits(calls=300, period=FIVE_MINUTES)
def call_pmc(doi_ids, tool_name, email):
query = (
"https://www.ncbi.nlm.nih.gov/pmc/utils/idconv/v1.0/?"
f"ids={','.join(doi_ids)}"
f"&tool={tool_name}"
f"&email={email}"
"&format=json"
)
return requests.get(query)
# # Map preprint DOIs to Published DOIs
# In[6]:
batch_limit = 100
doi_mapper_records = []
for batch in tqdm.tqdm(range(0, len(dois), batch_limit)):
response = call_biorxiv(dois[batch:batch+batch_limit])
doi_mapper_records += [
{
"preprint_doi": collection['doi'],
"posted_date": collection['date'],
"published_doi": collection['published'],
"version": collection['version']
}
for result in response
for collection in result['collection']
]
# In[7]:
(
pd.DataFrame
.from_records(doi_mapper_records)
.to_csv("output/mapped_published_doi_part1.tsv", sep="\t", index=False)
)
# # Map Journal Titles to DOI
# In[6]:
published_doi_df = pd.read_csv(
"output/mapped_published_doi_part1.tsv",
sep="\t"
)
print(published_doi_df.shape)
published_doi_df.head()
# In[9]:
mapped_preprints_df = (
preprints_df
.assign(
version=lambda x: x.document.apply(lambda doc: int(doc.split(".")[0][-1])),
)
.rename(index=str, columns={"doi":"preprint_doi"})
.merge(
published_doi_df.assign(
published_doi=lambda x: x.published_doi.apply(
lambda url: re.sub(r"http(s)?://doi.org/", '', url)
if type(url) == str else url
)
),
on=["preprint_doi", "version"]
)
)
print(mapped_preprints_df.shape)
mapped_preprints_df.head()
# In[11]:
mapped_preprints_df.to_csv(
"output/mapped_published_doi_part2.tsv",
sep="\t", index=False
)
# # Map Published Articles to PMC
# In[6]:
preprint_df =
|
pd.read_csv("output/mapped_published_doi_part2.tsv", sep="\t")
|
pandas.read_csv
|
#!/usr/bin/env python
import argparse
import logging
import os
import pickle
import numpy as np
import pandas as pd
from sklearn.neighbors import LocalOutlierFactor
from sklearn.ensemble import IsolationForest
from sklearn.svm import OneClassSVM
from sklearn.covariance import EllipticEnvelope
from sklearn.model_selection import GridSearchCV
from bert_reranker.models.sklearn_outliers_model import collect_question_embeddings
logger = logging.getLogger(__name__)
SKLEARN_MODEL_FILE_NAME = "sklearn_outlier_model.pkl"
def add_results_to_df(df, results, fname):
result_df = pd.DataFrame([results], columns=df.columns)
df = df.append(result_df)
df = df.rename(index={0: fname})
return df
def get_model_and_params(model_name):
if model_name == "lof":
base_clf = LocalOutlierFactor()
parameters = {
"n_neighbors": [3, 4, 5, 6, 8, 10, 20],
"contamination": list(np.arange(0.1, 0.5, 0.1)),
"novelty": [True],
}
elif model_name == "isolation_forest":
base_clf = IsolationForest()
parameters = {
"max_samples": [10, 50, 100, 200, 313],
"n_estimators": [100, 150, 200],
"contamination": list(np.arange(0.1, 0.5, 0.1)),
"max_features": [1, 2, 5],
"random_state": [42],
}
elif model_name == "ocsvm":
base_clf = OneClassSVM()
parameters = {
"kernel": ["linear", "poly", "rbf"],
"gamma": [0.001, 0.005, 0.01, 0.1],
}
elif model_name == "elliptic_env":
base_clf = EllipticEnvelope()
parameters = {
"contamination": list(np.arange(0.1, 0.5, 0.1)),
"random_state": [42],
}
else:
raise NotImplementedError()
return base_clf, parameters
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--embeddings", help="numpy file with embeddings", required=True
)
parser.add_argument(
"--output", help="will store the model output in this folder", required=True
)
parser.add_argument(
"--test-embeddings",
help="list of embeddings to fine tune the sklearn model on",
required=True,
)
parser.add_argument(
"--eval-embeddings",
help="These embeddings will only be evaluated on",
required=True,
type=str,
nargs="+",
)
parser.add_argument(
"--keep-ood-for-questions",
help="will keep ood embeddings for questions- by default, they are "
"filtered out",
action="store_true",
)
parser.add_argument(
"--train-on-questions",
help="will include question embeddings in train",
action="store_true",
)
parser.add_argument(
"--train-on-passage-headers",
help="will include passage-headers in train",
action="store_true",
)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
with open(args.embeddings, "rb") as in_stream:
data = pickle.load(in_stream)
if args.train_on_questions:
embeddings = collect_question_embeddings(args, data)
else:
embeddings = []
if args.train_on_passage_headers:
passage_header_embs = data["passage_header_embs"]
embeddings.extend(passage_header_embs)
logger.info("found {} passage headers embs".format(len(passage_header_embs)))
logger.info("final size of the collected embeddings: {}".format(len(embeddings)))
embedding_array = np.concatenate(embeddings)
# pd dataframe to save results as .csv
# we save and read to/from disk to bypass the scoring method
df_columns = [args.test_embeddings]
df_columns.extend(args.eval_embeddings)
df_columns.append("contamination")
df_columns.append("n_neighbors")
results_df = pd.DataFrame(columns=df_columns)
results_df.to_csv("results_lof.csv")
def scoring(estimator, X, y=None, args=args):
from sklearn.metrics import accuracy_score
logger.info("\n" * 2)
logger.info("*" * 50)
logger.info("sklearn model params {}".format(estimator))
results_df =
|
pd.read_csv("results_lof.csv", index_col=0)
|
pandas.read_csv
|
import numpy as np
import cv2
import csv
import os
import pandas as pd
import time
def calcuNearestPtsDis2(ptList1):
''' Find the nearest point of each point in ptList1 & return the mean min_distance
Parameters
----------
ptList1: numpy array
points' array, shape:(x,2)
Return
----------
mean_Dis: float
the mean value of the minimum distances
'''
if len(ptList1)<=1:
print('error!')
return 'error'
minDis_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,0:2]
ptList2 = np.delete(ptList1,i,axis=0)
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList2)**2, axis=1).astype(np.float32) )
minDis = disMat.min()
minDis_list.append(minDis)
minDisArr = np.array(minDis_list)
mean_Dis = np.mean(minDisArr)
return mean_Dis
def calcuNearestPtsDis(ptList1, ptList2):
''' Find the nearest point of each point in ptList1 from ptList2
& return the mean min_distance
Parameters
----------
ptList1: numpy array
points' array, shape:(x,2)
ptList2: numpy array
points' array, shape:(x,2)
Return
----------
mean_Dis: float
the mean value of the minimum distances
'''
if (not len(ptList2)) or (not len(ptList1)):
print('error!')
return 'error'
minDis_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,0:2]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList2)**2, axis=1).astype(np.float32) )
minDis = disMat.min()
minDis_list.append(minDis)
minDisArr = np.array(minDis_list)
mean_Dis = np.mean(minDisArr)
return mean_Dis
def calcuNearestPts(csvName1, csvName2):
ptList1_csv = pd.read_csv(csvName1,usecols=['x_cord', 'y_cord'])
ptList2_csv = pd.read_csv(csvName2,usecols=['x_cord', 'y_cord'])
ptList1 = ptList1_csv.values[:,:2]
ptList2 = ptList2_csv.values[:,:2]
minDisInd_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,0:2]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList2)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
ptList1_csv = pd.concat([ptList1_csv, pd.DataFrame( columns=['nearestInd'],data = minDisInd)], axis=1)
ptList1_csv.to_csv(csvName1,index=False)
return minDisInd
def drawDisPic(picInd):
picName = 'patients_dataset/image/'+ picInd +'.png'
img = cv2.imread(picName)
csvName1='patients_dataset/data_csv/'+picInd+'other_tumour_pts.csv'
csvName2='patients_dataset/data_csv/'+picInd+'other_lymph_pts.csv'
ptList1_csv = pd.read_csv(csvName1)
ptList2_csv = pd.read_csv(csvName2)
ptList1 = ptList1_csv.values
ptList2 = ptList2_csv.values
for i in range(len(ptList1)):
img = cv2.circle(img, tuple(ptList1[i,:2]), 3 , (0, 0, 255), -1 )
img = cv2.line(img, tuple(ptList1[i,:2]) , tuple(ptList2[ ptList1[i,2] ,:2]), (0,255,0), 1)
for i in range(len(ptList2)):
img = cv2.circle(img, tuple(ptList2[i,:2]), 3 , (255, 0, 0), -1 )
cv2.imwrite( picInd+'_dis.png',img)
def drawDistancePic(disName1, disName2, picID):
''' Draw & save the distance pics
Parameters
----------
disName1,disName2: str
such as 'positive_lymph', 'all_tumour'
picID: str
the patient's ID
'''
cellName_color = {'other_lymph': (255, 0, 0), 'positive_lymph': (255, 255, 0),
'other_tumour': (0, 0, 255), 'positive_tumour': (0, 255, 0)}
ptline_color = {'positive_lymph': (0,0,255), 'positive_tumour': (0,0,255),
'ptumour_plymph': (51, 97, 235), 'other_tumour': (0, 255, 0)}
if (disName1 == 'all_tumour' and disName2 == 'all_lymph') or (disName1 == 'all_tumour' and disName2 == 'positive_lymph'):
line_color = (0,255,255)
elif disName1 == 'positive_tumour' and disName2 == 'positive_lymph':
line_color = (51, 97, 235)
else:
line_color = ptline_color[disName1]
csv_dir = '/data/Datasets/MediImgExp/data_csv'
img_dir = '/data/Datasets/MediImgExp/image'
if disName1 == 'all_tumour' and disName2 == 'positive_lymph':
dis1_csv = pd.read_csv(csv_dir + '/' + picID + 'positive_tumour' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis2_csv = pd.read_csv(csv_dir + '/' + picID + 'other_tumour' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis3_csv = pd.read_csv(csv_dir + '/' + picID + 'positive_lymph' + '_pts.csv', usecols=['x_cord', 'y_cord'])
ptList1 = dis1_csv.values[:,:2]
ptList2 = dis2_csv.values[:,:2]
ptList3 = dis3_csv.values[:,:2]
# positive tumour: find the nearest lymph cell
minDisInd_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,:]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList3)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis1_csv = pd.concat([dis1_csv, pd.DataFrame(columns=['nearestInd'], data=minDisInd)], axis=1)
# other tumour: find the nearest lymph cell
minDisInd_list = []
for i in range(len(ptList2)):
currentPt = ptList2[i,:]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList3)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis2_csv = pd.concat([dis2_csv, pd.DataFrame(columns=['nearestInd'], data=minDisInd)], axis=1)
img = cv2.imread(img_dir + '/' + picID + '.jpg')
ptList1 = dis1_csv.values
for i in range(len(ptList1)):
img = cv2.line(img, tuple(ptList1[i,:2]), tuple(ptList3[ptList1[i, 2],:2]), line_color, 1)
ptList2 = dis2_csv.values
for i in range(len(ptList2)):
img = cv2.line(img, tuple(ptList2[i,:2]), tuple(ptList3[ptList2[i, 2],:2]), line_color, 1)
for i in range(len(ptList1)):
img = cv2.circle(img, tuple(ptList1[i,:2]), 4, (0, 255, 0), -1)
for i in range(len(ptList2)):
img = cv2.circle(img, tuple(ptList2[i,:2]), 4, (0, 0, 255), -1)
for i in range(len(ptList3)):
img = cv2.circle(img, tuple(ptList3[i,:2]), 4, (255, 255, 0), -1)
cv2.imwrite(picID + disName1 + '_' + disName2 + '_dis.png', img)
elif disName1 == 'all_tumour' and disName2 == 'all_lymph':
dis1_csv = pd.read_csv(csv_dir + '/' + picID + 'positive_tumour' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis2_csv =
|
pd.read_csv(csv_dir + '/' + picID + 'other_tumour' + '_pts.csv', usecols=['x_cord', 'y_cord'])
|
pandas.read_csv
|
import numpy as np
import sys, os
import pandas as pd
from sklearn import linear_model
from sklearn.cluster import KMeans
import pickle
from osgeo import gdal
from multiprocessing import Process
import warnings
warnings.filterwarnings("ignore")
import logging
log = logging.getLogger()
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s %(processName)s: %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(logging.DEBUG)
def coefficient_matrix(dates, avg_days_yr=365.25, num_coefficients=8):
"""
Fourier transform function to be used for the matrix of inputs for
model fitting
Args:
dates: list of ordinal dates
num_coefficients: how many coefficients to use to build the matrix
Returns:
Populated numpy array with coefficient values
Original author: <NAME>
"""
w = 2 * np.pi / avg_days_yr
matrix = np.zeros(shape=(len(dates), 7), order='F')
# lookup optimizations
# Before optimization - 12.53% of total runtime
# After optimization - 10.57% of total runtime
cos = np.cos
sin = np.sin
w12 = w * dates
matrix[:, 0] = dates
matrix[:, 1] = cos(w12)
matrix[:, 2] = sin(w12)
if num_coefficients >= 6:
w34 = 2 * w12
matrix[:, 3] = cos(w34)
matrix[:, 4] = sin(w34)
if num_coefficients >= 8:
w56 = 3 * w12
matrix[:, 5] = cos(w56)
matrix[:, 6] = sin(w56)
return matrix
def lasso_fill(dates, X, avg_days_yr=365.25):
#Date: n_features
#X: (n_samples, n_features), non valid as 0
coef_matrix = coefficient_matrix(dates, avg_days_yr) #(n_feature, 7)
lasso = linear_model.Lasso()
X_valid = (X != 0) * np.isfinite(X)
X_invalid = ~X_valid
for i_row in range(X.shape[0]):
# print('shape :', X_valid[i_row, :].shape, coef_matrix[X_valid[i_row, :], :].shape, X[i_row, :].shape, X[i_row, :][X_valid[i_row, :]].shape)
model = lasso.fit(coef_matrix[X_valid[i_row, :], :], X[i_row, :][X_valid[i_row, :]])
X[i_row, :][X_invalid[i_row, :]] = model.predict(coef_matrix[X_invalid[i_row, :], :])
return X
def ridge_fill(dates, X, avg_days_yr=365.25):
#Date: n_features
#X: (n_samples, n_features), non valid as 0
coef_matrix = coefficient_matrix(dates, avg_days_yr) #(n_feature, 7)
lasso = linear_model.Ridge()
X_valid = (X != 0) * np.isfinite(X)
X_invalid = ~X_valid
for i_row in range(X.shape[0]):
# print('shape :', X_valid[i_row, :].shape, coef_matrix[X_valid[i_row, :], :].shape, X[i_row, :].shape, X[i_row, :][X_valid[i_row, :]].shape)
model = lasso.fit(coef_matrix[X_valid[i_row, :], :], X[i_row, :][X_valid[i_row, :]])
X[i_row, :][X_invalid[i_row, :]] = model.predict(coef_matrix[X_invalid[i_row, :], :])
return X
def huber_fill(dates, X, avg_days_yr=365.25):
#Date: n_features
#X: (n_samples, n_features), non valid as 0
coef_matrix = coefficient_matrix(dates, avg_days_yr) #(n_feature, 7)
huber = linear_model.HuberRegressor()
X_valid = (X != 0)
X_invalid = (X == 0)
for i_row in range(X.shape[0]):
print('shape :', X_valid[i_row, :].shape, coef_matrix[X_valid[i_row, :], :].shape, X[i_row, :].shape, X[i_row, :][X_valid[i_row, :]].shape)
model = huber.fit(coef_matrix[X_valid[i_row, :], :], X[i_row, :][X_valid[i_row, :]])
X[i_row, :][X_invalid[i_row, :]] = model.predict(coef_matrix[X_invalid[i_row, :], :])
return X
def find_lastValley(arr_1d):
# https://stackoverflow.com/questions/4624970/finding-local-maxima-minima-with-numpy-in-a-1d-numpy-array
min_idx = (np.diff(np.sign(np.diff(arr_1d))) > 0).nonzero()[0] + 1
if (len(min_idx) == 0): # if there is only one peak
return 21 # at least 21 clear obs for the harmonic model
else:
return np.argmin(abs(arr_1d - arr_1d[min_idx][-1]))
def save_cluster(ts, out_name, n_clusters=20, n_cpu=-1, method='KMean'):
if method == 'KMean':
cls = KMeans(n_clusters, n_jobs=n_cpu)
labels = cls.fit_predict(ts)
else:
print('Not implemented!')
return False
pickle.dump(cls, open(out_name, 'wb'))
return True
def gather_training(acq_datelist, img_stack, outDir=None, total_size=200000, save_slice=True):
"""
This function generates training data from image stacks.
Parameters
----------
acq_datelist: list
list of acquisition dates (n_timesteps)
img_stack: 3d ndarray
A 3d array contains one year of image time series (n_rows, n_columns, n_timesteps)
cloud contaminated value should be set as 0 or negative.
outDir: String
Specification of output location.
total_size: int
For time cost optimization, use only subset of overlap time sereies as training data.
Set -1 to use all.
save_slice: bool, optional
If tree, the image stack will be saved as slice files.
This is also an input of gap filling function
"""
obs_clear_count = np.sum(img_stack > 0, axis=2)
hist, bins = np.histogram(obs_clear_count, bins=range(np.max(obs_clear_count)))
overlap_thesh = bins[find_lastValley(hist)]
print('Valley threshold: ', overlap_thesh)
if max(bins) < overlap_thesh:
print('Can not find enough clear observations (> 21)')
return None
overlap_idx = (obs_clear_count > overlap_thesh)
obs_clear_overlap = img_stack[overlap_idx]
obs_clear_overlap_samp = obs_clear_overlap[np.random.permutation(np.sum(overlap_idx))[:total_size], :].T
del obs_clear_overlap
training_data =
|
pd.DataFrame(obs_clear_overlap_samp, index=acq_datelist)
|
pandas.DataFrame
|
import init
import constants as cn
from trip import BikeTrip, CarTrip, TransitTrip, WalkTrip
from index_base_class import IndexBase
import pandas as pd
from collections import defaultdict
class ModeChoiceCalculator(IndexBase):
"""
"""
def __init__(self, car_time_threshold=cn.CAR_TIME_THRESHOLD,
bike_time_threshold=cn.BIKE_TIME_THRESHOLD,
transit_time_threshold=cn.TRANSIT_TIME_THRESHOLD,
walk_time_threshold=cn.WALK_TIME_THRESHOLD):
"""
Instantiate a ModeChoiceCalculator with different thresholds for the
four modes of transportation, using defaults from constants.py.
Each threshold is a float denoting times in minutes.
To toggle a mode off (rule out that mode entirely), set its threshold to
0.
"""
self.car_time_threshold = car_time_threshold
self.bike_time_threshold = bike_time_threshold
self.transit_time_threshold = transit_time_threshold
self.walk_time_threshold = walk_time_threshold
def trip_from_row(self, row):
"""
Input:
row: a row in a Pandas DataFrame
Output:
trip: a Trip object
Given a row in a Pandas DataFrame containing attributes of a trip,
instantiate a Trip object.
Depending on the value for the column 'mode', return a subtype of Trip.
"""
origin = row[cn.BLOCK_GROUP]
if cn.LAT in row:
dest_lat = row[cn.LAT]
else:
dest_lat = cn.CITY_CENTER[0]
if cn.LON in row:
dest_lon = row[cn.LON]
else:
dest_lon = cn.CITY_CENTER[1]
mode = row[cn.MODE]
distance = row[cn.DISTANCE]
duration = row[cn.DURATION]
# TODO: find a way to not instantiate variables as None
# duration_in_traffic = self._handle_missing_columns(row, cn.DURATION_IN_TRAFFIC)
duration_in_traffic = duration
# fare_value = self._handle_missing_columns(row, cn.FARE_VALUE)
fare_value = row[cn.FARE_VALUE]
basket_category = None
departure_time = row[cn.DEPARTURE_TIME]
dest_blockgroup = row[cn.DEST_BLOCK_GROUP]
# neighborhood_long = row[cn.NBHD_LONG]
# neighborhood_short = row[cn.NBHD_SHORT]
# council_district = row[cn.COUNCIL_DISTRICT]
# urban_village = row[cn.URBAN_VILLAGE]
# zipcode = row[cn.ZIPCODE]
neighborhood_long = None
neighborhood_short = None
council_district = None
urban_village = None
zipcode = None
# Create a subclass of Trip based on the mode
if mode == cn.DRIVING_MODE:
trip = CarTrip(origin, dest_lat, dest_lon, distance, duration,
basket_category, departure_time,
duration_in_traffic=duration_in_traffic)
elif mode == cn.TRANSIT_MODE:
trip = TransitTrip(origin, dest_lat, dest_lon, distance, duration,
basket_category, departure_time,
fare_value=fare_value)
elif mode == cn.BIKING_MODE:
trip = BikeTrip(origin, dest_lat, dest_lon, distance, duration,
basket_category, departure_time)
elif mode == cn.WALKING_MODE:
trip = WalkTrip(origin, dest_lat, dest_lon, distance, duration,
basket_category, departure_time)
else:
# Should have a custom exception here
trip = None
trip.set_geocoded_attributes(dest_blockgroup, neighborhood_long,
neighborhood_short, council_district, urban_village, zipcode)
return trip
def _handle_missing_columns(self, row, attribute):
try:
row[attribute]
except:
return None
else:
return row[attribute]
def is_viable(self, trip):
"""
Inputs:
trip (Trip)
Outputs:
viable (int)
This function takes in a Trip and returns a value indicating whether a
trip is viable (1) or not viable (0).
If the duration of a trip exceeds the threshold for that trip's mode,
viability is 0.
"""
# TODO: time of day weight for different modes.
# need to carry things?
# elevation?
viable = 0
if trip.mode == cn.DRIVING_MODE and trip.duration < self.car_time_threshold:
viable = 1
elif trip.mode == cn.BIKING_MODE and trip.duration < self.bike_time_threshold:
viable = 1
elif trip.mode == cn.TRANSIT_MODE and trip.duration < self.transit_time_threshold:
# If the trip's fare_value is None, Google Maps gave walking directions
# and thus, transit is not viable.
if trip.fare_value:
viable = 1
elif trip.mode == cn.WALKING_MODE and trip.duration < self.walk_time_threshold:
viable = 1
return viable
def trips_per_blockgroup(self, df, viable_only=False):
"""
Inputs:
df (Dataframe)
viable_only (Boolean)
Outputs:
blkgrp_dict (dict)
Given a dataframe containing data for one trip per row,
instantiate a trip for each row, calculate its viability,
and aggregate the trips for each blockgroup.
If viable_only == True, only append the viable trips to the lists.
Return a dict where keys are blockgroups and values are lists of Trips.
"""
blkgrp_dict = defaultdict(list)
for _, row in df.iterrows():
trip = self.trip_from_row(row)
blkgrp = trip.origin
viable = self.is_viable(trip)
trip.set_viability(viable)
if viable_only:
if viable == 1:
blkgrp_dict[blkgrp].append(trip)
else:
blkgrp_dict[blkgrp].append(trip)
return blkgrp_dict
def calculate_mode_avail(self, trips):
"""
Input: trips (list of Trips)
Output: scores (dict)
keys: blockgroup IDs (int)
values: list of Trip objects
For each mode, calculate the ratio of viable trips to total trips for
that particular mode. Return a dict containing scores for each mode.
"""
# Hours of data availability, HOURS constant should be float
scores = {}
for mode in [cn.DRIVING_MODE, cn.BIKING_MODE, cn.TRANSIT_MODE, cn.WALKING_MODE]:
# List of 0s and 1s corresponding to binary viability value for each trip
viability_per_trip = [trip.viable for trip in trips if trip.mode == mode]
# Number of viable trips
viable_trips = sum(viability_per_trip)
if viable_trips <= 0:
mode_avail_score = 0
else:
mode_avail_score = viable_trips / len(viability_per_trip)
# if mode == cn.DRIVING_MODE or mode == cn.TRANSIT_MODE:
# mode_avail /= cn.TRAVEL_HOURS
# mode_avail /= cn.BASKET_SIZE
scores[mode] = mode_avail_score
return scores
def create_availability_df(self, blkgrp_dict):
"""
Input:
blkgrp_dict (dict)
keys: blockgroup IDs (int)
values: list of Trips originating from that blockgroup
Output:
df (Pandas DataFrame)
Given a dict in which keys are blockgroup IDs and values are a list of
trips from that blockgroup, this method calculates a mode availability
score for each blockroup and creates a Pandas DataFrame with a row for
each block group and columns for mode-specific and total availability
scores.
Mode-specific scores are calculated by the ratio of viable trips to
total trips. The final mode availability score is the unweighted mean
of the 4 mode-specific scores.
"""
data = []
for blkgrp, trips in blkgrp_dict.items():
mode_scores = self.calculate_mode_avail(trips)
row = mode_scores
mode_index = sum(mode_scores.values()) / 4
row[cn.BLOCK_GROUP] = blkgrp
row[cn.MODE_CHOICE_INDEX] = mode_index
data.append(row)
cols=[cn.BLOCK_GROUP, cn.DRIVING_MODE, cn.BIKING_MODE, cn.TRANSIT_MODE,
cn.WALKING_MODE, cn.MODE_CHOICE_INDEX]
df =
|
pd.DataFrame(data, columns=cols)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import holidays
from config import log
from datetime import date
from pandas.tseries.offsets import BDay
from collections import defaultdict
from xbbg import blp
logger = log.get_logger()
class clean_trade_file():
def __init__(self, trade_file, reuse_ticker_dict):
self.trade_file = trade_file
self.reuse_ticker_dict = reuse_ticker_dict
def run(self):
def adjust_reuse_ticker(trade_file, reuse_ticker_dict):
for key, value in reuse_ticker_dict.items():
this_reuse_index = trade_file[(trade_file["bbg_ticker"] == key) &
(trade_file["effective_date"] <= pd.Timestamp(value[-1]))].index
trade_file.loc[this_reuse_index, "bbg_ticker"] = value[0]
return trade_file
def adjust_holiday(trade_file):
country_list = list(trade_file["listing_place"].drop_duplicates())
start_date_dict = defaultdict(list)
end_date_dict = defaultdict(list)
for country in country_list:
this_country_df = trade_file[trade_file["listing_place"] == country].copy()
this_holiday = holidays.CountryHoliday(country)
holiday_start_date = [start_date for start_date in list(this_country_df["trade_start_date"].drop_duplicates())
if start_date in this_holiday]
holiday_end_date = [end_date for end_date in list(this_country_df["trade_end_date"].drop_duplicates())
if end_date in this_holiday]
if holiday_start_date != []:
for date in holiday_start_date:
adjust_date = date + BDay(1)
while adjust_date in this_holiday:
adjust_date += BDay(1)
start_date_dict[date] = [adjust_date]
if holiday_end_date != []:
for date in holiday_end_date:
adjust_date = date - BDay(1)
while adjust_date in this_holiday:
adjust_date -= BDay(1)
end_date_dict[date] = [adjust_date]
adjust_num = 0
for adjust_dict in [start_date_dict, end_date_dict]:
date_column = "trade_start_date" if adjust_num == 0 else "trade_end_date"
for key, value in adjust_dict.items():
adjust_index = trade_file[trade_file[date_column] == key].index
trade_file.loc[adjust_index, date_column] = value[0]
adjust_num += 1
return trade_file
# get trade id
trade_file = self.trade_file.reset_index().rename(columns={"index": "trade_id"})
logger.info("Created Trade Id")
# adjust end date beyond today
trade_file["trade_end_date"] = [
pd.Timestamp(date.today() - BDay(1)) if end_date >= pd.Timestamp(date.today()) else end_date
for end_date in trade_file["trade_end_date"]
]
logger.info("Adjusted End Date beyond Today")
# update reuse ticker
trade_file = adjust_reuse_ticker(trade_file=trade_file, reuse_ticker_dict=self.reuse_ticker_dict)
logger.info("Updated re-used BBG ticker")
# adjust holiday
trade_file = adjust_holiday(trade_file=trade_file)
logger.info("Adjusted Start Date and End Date based on Holidays")
return trade_file
class get_backtest_files():
def __init__(self, trade_file, funding_source, output_hsci_trade_file_path, output_hsci_backtest_file_path):
self.trade_file = trade_file
self.funding_source = funding_source
self.output_hsci_trade_file_path = output_hsci_trade_file_path
self.output_hsci_backtest_file_path = output_hsci_backtest_file_path
def run(self):
def reconstruct_price_data(price_data):
price_data = price_data.unstack().reset_index()
price_data.columns = ["bbg_ticker", "item_name", "date", "item_value"]
price_data["item_value"] = price_data["item_value"].astype("float")
price_data["date"] = price_data["date"].astype("datetime64[ns]")
return price_data
def adjust_start_end_date_based_on_trade_data(this_trade_df, price_data):
# halt flag dataframe
active_df = price_data[price_data["item_name"] == "volume"].copy()
active_df = active_df.dropna(subset=["item_value"])
active_stock = list(active_df["bbg_ticker"].drop_duplicates())
halt_list = [stock for stock in this_stock_list if stock not in active_stock]
halt_df = pd.DataFrame(index=halt_list).reset_index().rename(columns={"index": "bbg_ticker"})
halt_df["halt_flag"] = True
logger.info("Got Halt Flag")
# ipo or delist dataframe
start_end_date_df = active_df.groupby(["bbg_ticker"])["date"].agg(["min", "max"])
ipo_df = start_end_date_df[start_end_date_df["min"] != start_date].reset_index().rename(
columns={"min": "ipo_date"}).drop(columns="max")
delist_df = start_end_date_df[start_end_date_df["max"] != end_date].reset_index().rename(
columns={"max": "delist_date"}).drop(columns="min")
logger.info("Got IPO Date and Delist Date")
# ipo return
ipo_return_list = []
if not ipo_df.empty:
for ticker in list(ipo_df["bbg_ticker"].drop_duplicates()):
ipo_return = list(price_data[(price_data["item_name"] == "last_price") &
(price_data["bbg_ticker"] == ticker)].sort_values("date")[
"item_value"].dropna())[:2]
ipo_return = (ipo_return[-1] / ipo_return[0] - 1) * 100
ipo_return_list.append(ipo_return)
ipo_df["ipo_return"] = ipo_return_list
logger.info("Got IPO Return")
# get adjusted trade df
if not halt_df.empty:
this_trade_df = pd.merge(this_trade_df, halt_df, on=["bbg_ticker"], how="left")
this_trade_df["halt_flag"] = this_trade_df["halt_flag"].fillna(False)
else:
this_trade_df["halt_flag"] = False
if not ipo_df.empty:
this_trade_df = pd.merge(this_trade_df, ipo_df, on=["bbg_ticker"], how="left")
else:
this_trade_df["ipo_date"] = pd.NaT
this_trade_df["ipo_return"] = np.nan
if not delist_df.empty:
this_trade_df = pd.merge(this_trade_df, delist_df, on=["bbg_ticker"], how="left")
else:
this_trade_df["delist_date"] = pd.NaT
this_trade_df["trade_start_date"] = [trade_start_date if
|
pd.isnull(ipo_date)
|
pandas.isnull
|
from datetime import date
import numpy as np
import pandas as pd
import pkgutil
# static file path
calendar = pkgutil.get_data(__name__, "Workbook/FY_Cal.xlsx")
def date_helper(date, return_value):
cal_workbook = (calendar)
df1 = pd.read_excel(cal_workbook)
df1['Date'] =
|
pd.to_datetime(df1['Date'])
|
pandas.to_datetime
|
"""
Copyright 2018 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal
from gs_quant.timeseries import *
def test_first():
dates = [
date(2019, 1, 1),
date(2019, 1, 2),
date(2019, 1, 3),
date(2019, 1, 4),
]
x = pd.Series([1.0, 2.0, 3.0, 4.0], index=dates)
result = first(x)
expected = pd.Series([1.0, 1.0, 1.0, 1.0], index=dates)
assert_series_equal(result, expected, obj="First")
def test_last():
dates = [
date(2019, 1, 1),
date(2019, 1, 2),
date(2019, 1, 3),
date(2019, 1, 4),
]
x = pd.Series([1.0, 2.0, 3.0, 4.0], index=dates)
result = last(x)
expected = pd.Series([4.0, 4.0, 4.0, 4.0], index=dates)
assert_series_equal(result, expected, obj="First")
y = pd.Series([1.0, 2.0, 3.0, np.nan], index=dates)
result = last(y)
expected = pd.Series([3.0, 3.0, 3.0, 3.0], index=dates)
assert_series_equal(result, expected, obj="Last non-NA")
def test_last_value():
with pytest.raises(MqValueError):
last_value(pd.Series())
x = pd.Series([1.0, 2.0, 3.0, 4.0], index=(pd.date_range("2020-01-01", periods=4, freq="D")))
assert last_value(x) == 4.0
y = pd.Series([5])
assert last_value(y) == 5
y = pd.Series([1.0, 2.0, 3.0, np.nan], index=(pd.date_range("2020-01-01", periods=4, freq="D")))
assert last_value(y) == 3.0
def test_count():
dates = [
date(2019, 1, 1),
date(2019, 1, 2),
date(2019, 1, 3),
date(2019, 1, 4),
]
x = pd.Series([1.0, 2.0, 3.0, 4.0], index=dates)
result = count(x)
expected = pd.Series([1.0, 2.0, 3.0, 4.0], index=dates)
assert_series_equal(result, expected, obj="Count")
def test_compare():
dates1 = [
date(2019, 1, 1),
date(2019, 1, 2),
date(2019, 1, 3),
date(2019, 1, 4),
]
dates2 = [
date(2019, 1, 1),
date(2019, 1, 2),
date(2019, 1, 3),
]
x = pd.Series([1.0, 2.0, 2.0, 4.0], index=dates1)
y = pd.Series([2.0, 1.0, 2.0], index=dates2)
expected = pd.Series([-1.0, 1.0, 0.0], index=dates2)
result = compare(x, y, method=Interpolate.INTERSECT)
assert_series_equal(expected, result, obj="Compare series intersect")
expected = pd.Series([1.0, -1.0, 0], index=dates2)
result = compare(y, x, method=Interpolate.INTERSECT)
assert_series_equal(expected, result, obj="Compare series intersect 2")
expected = pd.Series([-1.0, 1.0, 0, 0], index=dates1)
result = compare(x, y, method=Interpolate.NAN)
assert_series_equal(expected, result, obj="Compare series nan")
expected = pd.Series([-1.0, 1.0, 0, 1.0], index=dates1)
result = compare(x, y, method=Interpolate.ZERO)
assert_series_equal(expected, result, obj="Compare series zero")
expected = pd.Series([-1.0, 1.0, 0, 1.0], index=dates1)
result = compare(x, y, method=Interpolate.STEP)
assert_series_equal(expected, result, obj="Compare series step")
dates2 = [
date(2019, 1, 2),
date(2019, 1, 4),
date(2019, 1, 6),
]
dates1.append(date(2019, 1, 5))
xp = pd.Series([1, 2, 3, 4, 5], index=pd.to_datetime(dates1))
yp = pd.Series([1, 4, 0], index=pd.to_datetime(dates2))
result = compare(xp, yp, Interpolate.TIME)
dates1.append(date(2019, 1, 6))
expected = pd.Series([0.0, 1.0, 1.0, 0.0, 1.0, 0.0], index=pd.to_datetime(dates1))
assert_series_equal(result, expected, obj="Compare series greater time")
def test_diff():
dates = [
date(2019, 1, 1),
date(2019, 1, 2),
date(2019, 1, 3),
date(2019, 1, 4),
]
x = pd.Series([1.0, 2.0, 3.0, 4.0], index=dates)
result = diff(x)
expected = pd.Series([np.nan, 1.0, 1.0, 1.0], index=dates)
assert_series_equal(result, expected, obj="Diff")
result = diff(x, 2)
expected = pd.Series([np.nan, np.nan, 2.0, 2.0], index=dates)
assert_series_equal(result, expected, obj="Diff")
empty = pd.Series([], index=[])
result = diff(empty)
assert (len(result) == 0)
def test_lag():
dates = pd.date_range("2019-01-01", periods=4, freq="D")
x = pd.Series([1.0, 2.0, 3.0, 4.0], index=dates)
result = lag(x, '1m')
expected = pd.Series([1.0, 2.0, 3.0, 4.0], index=
|
pd.date_range("2019-01-31", periods=4, freq="D")
|
pandas.date_range
|
import os
import uuid
from datetime import datetime
from time import sleep
import fsspec
import pandas as pd
import pytest
import v3iofs
from storey import EmitEveryEvent
import mlrun
import mlrun.feature_store as fs
from mlrun import store_manager
from mlrun.datastore.sources import CSVSource, ParquetSource
from mlrun.datastore.targets import CSVTarget, NoSqlTarget, ParquetTarget
from mlrun.features import Entity
from tests.system.base import TestMLRunSystem
@TestMLRunSystem.skip_test_if_env_not_configured
# Marked as enterprise because of v3io mount and remote spark
@pytest.mark.enterprise
class TestFeatureStoreSparkEngine(TestMLRunSystem):
project_name = "fs-system-spark-engine"
spark_service = ""
pq_source = "testdata.parquet"
csv_source = "testdata.csv"
spark_image_deployed = (
False # Set to True if you want to avoid the image building phase
)
test_branch = "" # For testing specific branch. e.g.: "https://github.com/mlrun/mlrun.git@development"
@classmethod
def _init_env_from_file(cls):
env = cls._get_env_from_file()
cls.spark_service = env["MLRUN_SYSTEM_TESTS_DEFAULT_SPARK_SERVICE"]
def get_local_pq_source_path(self):
return os.path.relpath(str(self.assets_path / self.pq_source))
def get_remote_pq_source_path(self, without_prefix=False):
path = "v3io://"
if without_prefix:
path = ""
path += "/bigdata/" + self.pq_source
return path
def get_local_csv_source_path(self):
return os.path.relpath(str(self.assets_path / self.csv_source))
def get_remote_csv_source_path(self, without_prefix=False):
path = "v3io://"
if without_prefix:
path = ""
path += "/bigdata/" + self.csv_source
return path
def custom_setup(self):
from mlrun import get_run_db
from mlrun.run import new_function
from mlrun.runtimes import RemoteSparkRuntime
self._init_env_from_file()
if not self.spark_image_deployed:
store, _ = store_manager.get_or_create_store(
self.get_remote_pq_source_path()
)
store.upload(
self.get_remote_pq_source_path(without_prefix=True),
self.get_local_pq_source_path(),
)
store, _ = store_manager.get_or_create_store(
self.get_remote_csv_source_path()
)
store.upload(
self.get_remote_csv_source_path(without_prefix=True),
self.get_local_csv_source_path(),
)
if not self.test_branch:
RemoteSparkRuntime.deploy_default_image()
else:
sj = new_function(
kind="remote-spark", name="remote-spark-default-image-deploy-temp"
)
sj.spec.build.image = RemoteSparkRuntime.default_image
sj.with_spark_service(spark_service="dummy-spark")
sj.spec.build.commands = ["pip install git+" + self.test_branch]
sj.deploy(with_mlrun=False)
get_run_db().delete_function(name=sj.metadata.name)
self.spark_image_deployed = True
def test_basic_remote_spark_ingest(self):
key = "patient_id"
measurements = fs.FeatureSet(
"measurements",
entities=[fs.Entity(key)],
timestamp_key="timestamp",
engine="spark",
)
source = ParquetSource("myparquet", path=self.get_remote_pq_source_path())
fs.ingest(
measurements,
source,
return_df=True,
spark_context=self.spark_service,
run_config=fs.RunConfig(local=False),
)
assert measurements.status.targets[0].run_id is not None
def test_basic_remote_spark_ingest_csv(self):
key = "patient_id"
name = "measurements"
measurements = fs.FeatureSet(
name,
entities=[fs.Entity(key)],
engine="spark",
)
source = CSVSource(
"mycsv", path=self.get_remote_csv_source_path(), time_field="timestamp"
)
fs.ingest(
measurements,
source,
return_df=True,
spark_context=self.spark_service,
run_config=fs.RunConfig(local=False),
)
features = [f"{name}.*"]
vec = fs.FeatureVector("test-vec", features)
resp = fs.get_offline_features(vec)
df = resp.to_dataframe()
assert type(df["timestamp"][0]).__name__ == "Timestamp"
def test_error_flow(self):
df = pd.DataFrame(
{
"name": ["Jean", "Jacques", "Pierre"],
"last_name": ["Dubois", "Dupont", "Lavigne"],
}
)
measurements = fs.FeatureSet(
"measurements",
entities=[fs.Entity("name")],
engine="spark",
)
with pytest.raises(mlrun.errors.MLRunInvalidArgumentError):
fs.ingest(
measurements,
df,
return_df=True,
spark_context=self.spark_service,
run_config=fs.RunConfig(local=False),
)
def test_ingest_to_csv(self):
key = "patient_id"
csv_path_spark = "v3io:///bigdata/test_ingest_to_csv_spark"
csv_path_storey = "v3io:///bigdata/test_ingest_to_csv_storey.csv"
measurements = fs.FeatureSet(
"measurements_spark",
entities=[fs.Entity(key)],
timestamp_key="timestamp",
engine="spark",
)
source = ParquetSource("myparquet", path=self.get_remote_pq_source_path())
targets = [CSVTarget(name="csv", path=csv_path_spark)]
fs.ingest(
measurements,
source,
targets,
spark_context=self.spark_service,
run_config=fs.RunConfig(local=False),
)
csv_path_spark = measurements.get_target_path(name="csv")
measurements = fs.FeatureSet(
"measurements_storey",
entities=[fs.Entity(key)],
timestamp_key="timestamp",
)
source = ParquetSource("myparquet", path=self.get_remote_pq_source_path())
targets = [CSVTarget(name="csv", path=csv_path_storey)]
fs.ingest(
measurements,
source,
targets,
)
csv_path_storey = measurements.get_target_path(name="csv")
read_back_df_spark = None
file_system = fsspec.filesystem("v3io")
for file_entry in file_system.ls(csv_path_spark):
filepath = file_entry["name"]
if not filepath.endswith("/_SUCCESS"):
read_back_df_spark = pd.read_csv(f"v3io://{filepath}")
break
assert read_back_df_spark is not None
read_back_df_storey = None
for file_entry in file_system.ls(csv_path_storey):
filepath = file_entry["name"]
read_back_df_storey = pd.read_csv(f"v3io://{filepath}")
break
assert read_back_df_storey is not None
assert read_back_df_spark.sort_index(axis=1).equals(
read_back_df_storey.sort_index(axis=1)
)
@pytest.mark.parametrize("partitioned", [True, False])
def test_schedule_on_filtered_by_time(self, partitioned):
name = f"sched-time-{str(partitioned)}"
now = datetime.now()
path = "v3io:///bigdata/bla.parquet"
fsys = fsspec.filesystem(v3iofs.fs.V3ioFS.protocol)
pd.DataFrame(
{
"time": [
pd.Timestamp("2021-01-10 10:00:00"),
pd.Timestamp("2021-01-10 11:00:00"),
],
"first_name": ["moshe", "yosi"],
"data": [2000, 10],
}
).to_parquet(path=path, filesystem=fsys)
cron_trigger = "*/3 * * * *"
source = ParquetSource(
"myparquet", path=path, time_field="time", schedule=cron_trigger
)
feature_set = fs.FeatureSet(
name=name,
entities=[fs.Entity("first_name")],
timestamp_key="time",
engine="spark",
)
if partitioned:
targets = [
NoSqlTarget(),
ParquetTarget(
name="tar1",
path="v3io:///bigdata/fs1/",
partitioned=True,
partition_cols=["time"],
),
]
else:
targets = [
ParquetTarget(
name="tar2", path="v3io:///bigdata/fs2/", partitioned=False
),
NoSqlTarget(),
]
fs.ingest(
feature_set,
source,
run_config=fs.RunConfig(local=False),
targets=targets,
spark_context=self.spark_service,
)
# ingest starts every third minute and it can take ~150 seconds to finish.
time_till_next_run = 180 - now.second - 60 * (now.minute % 3)
sleep(time_till_next_run + 150)
features = [f"{name}.*"]
vec = fs.FeatureVector("sched_test-vec", features)
with fs.get_online_feature_service(vec) as svc:
resp = svc.get([{"first_name": "yosi"}, {"first_name": "moshe"}])
assert resp[0]["data"] == 10
assert resp[1]["data"] == 2000
pd.DataFrame(
{
"time": [
pd.Timestamp("2021-01-10 12:00:00"),
pd.Timestamp("2021-01-10 13:00:00"),
now + pd.Timedelta(minutes=10),
pd.Timestamp("2021-01-09 13:00:00"),
],
"first_name": ["moshe", "dina", "katya", "uri"],
"data": [50, 10, 25, 30],
}
).to_parquet(path=path)
sleep(180)
resp = svc.get(
[
{"first_name": "yosi"},
{"first_name": "moshe"},
{"first_name": "katya"},
{"first_name": "dina"},
{"first_name": "uri"},
]
)
assert resp[0]["data"] == 10
assert resp[1]["data"] == 50
assert resp[2] is None
assert resp[3]["data"] == 10
assert resp[4] is None
# check offline
resp = fs.get_offline_features(vec)
assert len(resp.to_dataframe() == 4)
assert "uri" not in resp.to_dataframe() and "katya" not in resp.to_dataframe()
def test_aggregations(self):
name = f"measurements_{uuid.uuid4()}"
test_base_time = datetime.fromisoformat("2020-07-21T21:40:00+00:00")
df = pd.DataFrame(
{
"time": [
test_base_time,
test_base_time + pd.Timedelta(minutes=1),
test_base_time + pd.Timedelta(minutes=2),
test_base_time + pd.Timedelta(minutes=3),
test_base_time + pd.Timedelta(minutes=4),
],
"first_name": ["moshe", "yosi", "yosi", "moshe", "yosi"],
"last_name": ["cohen", "levi", "levi", "cohen", "levi"],
"bid": [2000, 10, 11, 12, 16],
"mood": ["bad", "good", "bad", "good", "good"],
}
)
path = "v3io:///bigdata/test_aggregations.parquet"
fsys = fsspec.filesystem(v3iofs.fs.V3ioFS.protocol)
df.to_parquet(path=path, filesystem=fsys)
source = ParquetSource("myparquet", path=path, time_field="time")
data_set = fs.FeatureSet(
f"{name}_storey",
entities=[Entity("first_name"), Entity("last_name")],
)
data_set.add_aggregation(
column="bid",
operations=["sum", "max"],
windows="1h",
period="10m",
)
df = fs.ingest(data_set, source, targets=[])
assert df.to_dict() == {
"mood": {("moshe", "cohen"): "good", ("yosi", "levi"): "good"},
"bid": {("moshe", "cohen"): 12, ("yosi", "levi"): 16},
"bid_sum_1h": {("moshe", "cohen"): 2012, ("yosi", "levi"): 37},
"bid_max_1h": {("moshe", "cohen"): 2000, ("yosi", "levi"): 16},
"time": {
("moshe", "cohen"): pd.Timestamp("2020-07-21 21:43:00Z"),
("yosi", "levi"):
|
pd.Timestamp("2020-07-21 21:44:00Z")
|
pandas.Timestamp
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
import itertools
import numpy as np
import pytest
from pandas.compat import u
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Period, Series, Timedelta, date_range)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestDataFrameReshape(TestData):
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
tm.assert_frame_equal(pivoted, expected)
# name tracking
assert pivoted.index.name == 'index'
assert pivoted.columns.name == 'columns'
# don't specify values
pivoted = frame.pivot(index='index', columns='columns')
assert pivoted.index.name == 'index'
assert pivoted.columns.names == (None, 'columns')
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with pytest.raises(ValueError, match='duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame({}, columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame()
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
tm.assert_index_equal(result.columns, Index(['A', 'B'], name=0))
def test_pivot_index_none(self):
# gh-3962
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data).set_index('index')
result = frame.pivot(columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns='columns')
expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),
('values', 'Two')],
names=[None, 'columns'])
expected.index.name = 'index'
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.name == 'index'
assert result.columns.names == (None, 'columns')
expected.columns = expected.columns.droplevel(0)
result = frame.pivot(columns='columns', values='values')
expected.columns.name = 'columns'
tm.assert_frame_equal(result, expected)
def test_stack_unstack(self):
df = self.frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, df)
assert_frame_equal(unstacked_df['bar'], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, df)
assert_frame_equal(unstacked_cols_df['bar'].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, 'a', 'b'], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0],
columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(1, index=MultiIndex.from_product([levels[0],
levels[2]]),
columns=levels[1])
assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[['a', 'b']].stack(1)
expected = expected[['a', 'b']]
assert_frame_equal(result, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack(fill_value=-1)
expected = DataFrame({'a': [1, -1, 5], 'b': [2, 4, -1]},
index=['x', 'y', 'z'], dtype=np.int16)
assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame({'a': [1, 0.5, 5], 'b': [2, 4, 0.5]},
index=['x', 'y', 'z'], dtype=np.float)
assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame({'x': ['a', 'a', 'b'],
'y': ['j', 'k', 'j'],
'z': [0, 1, 2],
'w': [0, 1, 2]}).set_index(['x', 'y', 'z'])
unstacked = df.unstack(['x', 'y'], fill_value=0)
key = ('<KEY>')
expected = unstacked[key]
result = pd.Series([0, 0, 2], index=unstacked.index, name=key)
assert_series_equal(result, expected)
stacked = unstacked.stack(['x', 'y'])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
assert_frame_equal(result, df)
# From a series
s = df['w']
result = s.unstack(['x', 'y'], fill_value=0)
expected = unstacked['w']
assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list('AB'), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
# From a mixed type dataframe
df['A'] = df['A'].astype(np.int16)
df['B'] = df['B'].astype(np.float64)
result = df.unstack(fill_value=-1)
expected['A'] = expected['A'].astype(np.int16)
expected['B'] = expected['B'].astype(np.float64)
assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.float)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = pd.date_range('2012-01-01', periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [dv[0], pd.NaT, dv[3]],
'b': [dv[1], dv[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame({'a': [dv[0], dv[0], dv[3]],
'b': [dv[1], dv[2], dv[0]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [td[0], pd.NaT, td[3]],
'b': [td[1], td[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame({'a': [td[0], td[1], td[3]],
'b': [td[1], td[2], td[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [Period('2012-01'), Period('2012-02'), Period('2012-03'),
Period('2012-04')]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [periods[0], None, periods[3]],
'b': [periods[1], periods[2], None]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame({'a': [periods[0], periods[1], periods[3]],
'b': [periods[1], periods[2], periods[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = pd.Series(['a', 'b', 'c', 'a'], dtype='category')
data.index = pd.MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')],
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame({'a': pd.Categorical(list('axa'),
categories=list('abc')),
'b': pd.Categorical(list('bcx'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
# Fill with non-category results in a TypeError
msg = r"'fill_value' \('d'\) is not in"
with pytest.raises(TypeError, match=msg):
data.unstack(fill_value='d')
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value='c')
expected = DataFrame({'a': pd.Categorical(list('aca'),
categories=list('abc')),
'b': pd.Categorical(list('bcc'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = pd.DataFrame(dict(state=['IL', 'MI', 'NC'],
index=['a', 'b', 'c'],
some_categories=pd.Series(['a', 'b', 'c']
).astype('category'),
A=np.random.rand(3),
B=1,
C='foo',
D=pd.Timestamp('20010102'),
E=pd.Series([1.0, 50.0, 100.0]
).astype('float32'),
F=pd.Series([3.0, 4.0, 5.0]).astype('float64'),
G=False,
H=pd.Series([1, 200, 923442], dtype='int8')))
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(['state', 'index'])
unstack_and_compare(df1, 'index')
df1 = df.set_index(['state', 'some_categories'])
unstack_and_compare(df1, 'some_categories')
df1 = df.set_index(['F', 'C'])
unstack_and_compare(df1, 'F')
df1 = df.set_index(['G', 'B', 'state'])
unstack_and_compare(df1, 'B')
df1 = df.set_index(['E', 'A'])
unstack_and_compare(df1, 'E')
df1 = df.set_index(['state', 'index'])
s = df1['A']
unstack_and_compare(s, 'index')
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3),
repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
assert_frame_equal(df.stack(level=[1, 2]),
df.stack(level=1).stack(level=1))
assert_frame_equal(df.stack(level=[-2, -1]),
df.stack(level=1).stack(level=1))
df_named = df.copy()
df_named.columns.set_names(range(3), inplace=True)
assert_frame_equal(df_named.stack(level=[1, 2]),
df_named.stack(level=1).stack(level=1))
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ['exp', 'animal', 1]
assert_frame_equal(df2.stack(level=['animal', 1]),
animal_hair_stacked, check_names=False)
assert_frame_equal(df2.stack(level=['exp', 1]),
exp_hair_stacked, check_names=False)
# When mixed types are passed and the ints are not level
# names, raise
msg = ("level should contain all level names or all level numbers, not"
" a mixture of the two")
with pytest.raises(ValueError, match=msg):
df2.stack(level=['animal', 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ['exp', 'animal', 0]
assert_frame_equal(df3.stack(level=['animal', 0]),
animal_hair_stacked, check_names=False)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=['exp', 'animal'])
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
assert_frame_equal(df2.stack(level=[1, 2]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 1]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 2]), exp_hair_stacked,
check_names=False)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
assert_frame_equal(df3.stack(level=[0, 1]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 0]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 1]), exp_hair_stacked,
check_names=False)
def test_unstack_bool(self):
df = DataFrame([False, False],
index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]),
columns=['col'])
rs = df.unstack()
xp = DataFrame(np.array([[False, np.nan], [np.nan, False]],
dtype=object),
index=['a', 'b'],
columns=MultiIndex.from_arrays([['col', 'col'],
['c', 'l']]))
assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = pd.MultiIndex(
levels=[[u('foo'), u('bar')], [u('one'), u('two')],
[u('a'), u('b')]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=[u('first'), u('second'), u('third')])
s = pd.Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = pd.MultiIndex(
levels=[['foo', 'bar'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['first', 'second'])
expected = pd.DataFrame(np.array([[np.nan, 0],
[0, np.nan],
[np.nan, 0],
[0, np.nan]],
dtype=np.float64),
index=expected_mi,
columns=pd.Index(['a', 'b'], name='third'))
assert_frame_equal(result, expected)
def test_unstack_to_series(self):
# check reversibility
data = self.frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
assert_frame_equal(undo, self.frame)
# check NA handling
data = DataFrame({'x': [1, 2, np.NaN], 'y': [3.0, 4, np.NaN]})
data.index = Index(['a', 'b', 'c'])
result = data.unstack()
midx = MultiIndex(levels=[['x', 'y'], ['a', 'b', 'c']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4],
[1, 2, 3, 4],
[2, 1, 3, 4],
[2, 2, 3, 4]]
df = DataFrame(rows, columns=list('ABCD'))
result = df.get_dtype_counts()
expected =
|
Series({'int64': 4})
|
pandas.Series
|
"""
run_model.py
Purpose: Predict gene expression given graph structure and node features
Usage: python ./run_model.py [-c <str>] [-e <int>] [-lr <float>] [-cn <int]
[-gs <int>] [-ln <int>] [-ls <int>]
Arguments:
'-c', '--cell_line', default='E116', type=str)
'-e', '--max_epoch', default=1000,type=int)
'-lr', '--learning_rate', default=1e-4, type=float)
'-cn', '--num_graph_conv_layers', default=2, type=int)
'-gs', '--graph_conv_embed_size', default=256, type=int)
'-ln', '--num_lin_layers', default=3, type=int)
'-ls', '--lin_hidden_layer_size', default=256, type=int)
Processed inputs:
In ./data/cell_line subdirectory:
./hic_sparse.npz: Concatenated Hi-C matrix in sparse CSR format
./np_nodes_lab_genes.npy: Numpy array stored in binary format
2-column array that stores IDs of nodes corresponding to genes
and the node label (expression level)
./np_hmods_norm.npy: Numpy array stored in binary format
(F+1)-column array where the 0th column contains node IDs
and columns 1..F contain feature values, where F = total number of features
./df_genes.pkl: Pandas dataframe stored in .pkl format
5-column dataframe, where columns = [ENSEMBL ID,
gene name abbreviation, node ID, expression level, connected status]
*Note: Users can prepare these files or use process_inputs.py script provided
Outputs:
In ./data/cell_line/saved_runs subdirectory:
model_[date_and_time].pt: Model state dictionary stored in .pt (PyTorch) format
model_predictions_[date_and_time].csv: Predictions for each gene
Columns: [Dataset,Node ID, ENSEMBL ID, gene name abbreviation,
true label, predicted label, classification [TP/TN/FP/FN]]
model_[date_and_time]_info.txt: Text file containing summary of model
statistics (test AUC, F1 scores) as well as hyperparameter settings
"""
import os
import argparse
import time
from datetime import datetime, date
import random
import numpy as np
from scipy.sparse import load_npz
from sklearn.metrics import roc_auc_score, f1_score
import pandas as pd
import torch
import torch_geometric
import torch.nn.functional as F
import torch.nn as nn
from sage_conv_cat import SAGEConvCat
class GCN(nn.Module):
def __init__(self, num_feat, num_graph_conv_layers, graph_conv_embed_sizes, num_lin_layers, lin_hidden_sizes, num_classes):
'''
Defines model class
Parameters
----------
num_feat [int]: Feature dimension (int)
num_graph_conv_layers [int]: Number of graph convolutional layers (1, 2, or 3)
graph_conv_embed_sizes [int]: Embedding size of graph convolutional layers
num_lin_layers [int]: Number of linear layers (1, 2, or 3)
lin_hidden_sizes [int]: Embedding size of hidden linear layers
num_classes [int]: Number of classes to be predicted (2)
Returns
-------
None.
'''
super(GCN, self).__init__()
self.num_graph_conv_layers = num_graph_conv_layers
self.num_lin_layers = num_lin_layers
self.dropout_value = 0
if self.num_graph_conv_layers == 1:
self.conv1 = SAGEConvCat(num_feat, graph_conv_embed_sizes[0])
elif self.num_graph_conv_layers == 2:
self.conv1 = SAGEConvCat(num_feat, graph_conv_embed_sizes[0])
self.conv2 = SAGEConvCat(graph_conv_embed_sizes[0], graph_conv_embed_sizes[1])
elif self.num_graph_conv_layers == 3:
self.conv1 = SAGEConvCat(num_feat, graph_conv_embed_sizes[0])
self.conv2 = SAGEConvCat(graph_conv_embed_sizes[0], graph_conv_embed_sizes[1])
self.conv3 = SAGEConvCat(graph_conv_embed_sizes[1], graph_conv_embed_sizes[2])
if self.num_lin_layers == 1:
self.lin1 = nn.Linear(graph_conv_embed_sizes[-1], num_classes)
elif self.num_lin_layers == 2:
self.lin1 = nn.Linear(graph_conv_embed_sizes[-1], lin_hidden_sizes[0])
self.lin2 = nn.Linear(lin_hidden_sizes[0], num_classes)
elif self.num_lin_layers == 3:
self.lin1 = nn.Linear(graph_conv_embed_sizes[-1], lin_hidden_sizes[0])
self.lin2 = nn.Linear(lin_hidden_sizes[0], lin_hidden_sizes[1])
self.lin3 = nn.Linear(lin_hidden_sizes[1], num_classes)
self.loss_calc = nn.CrossEntropyLoss()
self.torch_softmax = nn.Softmax(dim=1)
def forward(self, x, edge_index, train_status=False):
'''
Forward function.
Parameters
----------
x [tensor]: Node features
edge_index [tensor]: Subgraph mask
train_status [bool]: optional, set to True for dropout
Returns
-------
scores [tensor]: Un-normalized class scores
'''
if self.num_graph_conv_layers == 1:
h = self.conv1(x, edge_index)
h = torch.relu(h)
elif self.num_graph_conv_layers == 2:
h = self.conv1(x, edge_index)
h = torch.relu(h)
h = self.conv2(h, edge_index)
h = torch.relu(h)
elif self.num_graph_conv_layers == 3:
h = self.conv1(x, edge_index)
h = torch.relu(h)
h = self.conv2(h, edge_index)
h = torch.relu(h)
h = self.conv3(h, edge_index)
h = torch.relu(h)
dropout_value = 0.5
if self.num_lin_layers == 1:
scores = self.lin1(h)
elif self.num_lin_layers == 2:
scores = self.lin1(h)
scores = torch.relu(scores)
scores = F.dropout(scores, p = dropout_value, training=train_status)
scores = self.lin2(scores)
elif self.num_lin_layers == 3:
scores = self.lin1(h)
scores = torch.relu(scores)
scores = F.dropout(scores, p = dropout_value, training=train_status)
scores = self.lin2(scores)
scores = torch.relu(scores)
scores = self.lin3(scores)
return scores
def loss(self, scores, labels):
'''
Calculates cross-entropy loss
Parameters
----------
scores [tensor]: Un-normalized class scores from forward function
labels [tensor]: Class labels for nodes
Returns
-------
xent_loss [tensor]: Cross-entropy loss
'''
xent_loss = self.loss_calc(scores, labels)
return xent_loss
def calc_softmax_pred(self, scores):
'''
Calculates softmax scores and predicted classes
Parameters
----------
scores [tensor]: Un-normalized class scores
Returns
-------
softmax [tensor]: Probability for each class
predicted [tensor]: Predicted class
'''
softmax = self.torch_softmax(scores)
predicted = torch.argmax(softmax, 1)
return softmax, predicted
def train_model(net, graph, max_epoch, learning_rate, targetNode_mask, train_idx, valid_idx, optimizer):
'''
Parameters
----------
net [GCN]: Instantiation of model class
graph [PyG Data class]: PyTorch Geometric Data object representing the graph
max_epoch [int]: Maximum number of training epochs
learning_rate [float]: Learning rate
targetNode_mask [tensor]: Subgraph mask for training nodes
train_idx [array]: Node IDs corresponding to training set
valid_idx [array]: Node IDs corresponding to validation set
optimizer [PyTorch optimizer class]: PyTorch optimization algorithm
Returns
-------
train_loss_vec [array]: Training loss for each epoch
train_AUC_vec [array]: Training AUC score for each epoch
valid_loss_vec [array]: Validation loss for each epoch
valid_AUC_vec [array]: Validation AUC score for each epoch
'''
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = net.to(device)
graph = graph.to(device)
optimizer = optimizer
train_labels = to_cpu_npy(graph.y[targetNode_mask[train_idx]])
valid_labels = to_cpu_npy(graph.y[targetNode_mask[valid_idx]])
train_loss_list = []
train_AUC_vec = np.zeros(np.shape(np.arange(max_epoch)))
valid_loss_list = []
valid_AUC_vec = np.zeros(np.shape(np.arange(max_epoch)))
model.train()
train_status = True
print('\n')
for e in list(range(max_epoch)):
if e%100 == 0:
print("Epoch", str(e), 'out of', str(max_epoch))
model.train()
train_status = True
optimizer.zero_grad()
### Only trains on nodes with genes due to masking
forward_scores = model(graph.x.float(), graph.edge_index, train_status)[targetNode_mask]
train_scores = forward_scores[train_idx]
train_loss = model.loss(train_scores, torch.LongTensor(train_labels).to(device))
train_softmax, _ = model.calc_softmax_pred(train_scores)
train_loss.backward()
optimizer.step()
### Calculate training and validation loss, AUC scores
model.eval()
valid_scores = forward_scores[valid_idx]
valid_loss = model.loss(valid_scores, torch.LongTensor(valid_labels).to(device))
valid_softmax, _ = model.calc_softmax_pred(valid_scores)
train_loss_list.append(train_loss.item())
train_softmax = to_cpu_npy(train_softmax)
train_AUC = roc_auc_score(train_labels, train_softmax[:,1], average="micro")
valid_loss_list.append(valid_loss.item())
valid_softmax = to_cpu_npy(valid_softmax)
valid_AUC = roc_auc_score(valid_labels, valid_softmax[:,1], average="micro")
train_AUC_vec[e] = train_AUC
valid_AUC_vec[e] = valid_AUC
train_loss_vec = np.reshape(np.array(train_loss_list), (-1, 1))
valid_loss_vec = np.reshape(np.array(valid_loss_list), (-1, 1))
return train_loss_vec, train_AUC_vec, valid_loss_vec, valid_AUC_vec
def eval_model(net, graph, targetNode_mask, train_idx, valid_idx, test_idx):
'''
Run final model and compute evaluation statistics post-training
Parameters
----------
net [GCN]: Instantiation of model class
graph [PyG Data class]: PyTorch Geometric Data object representing the graph
targetNode_mask [tensor]: Mask ensuring model only trains on nodes with genes
train_idx [array]: Node IDs corresponding to training set;
analogous for valid_idx and test_idx
Returns
-------
test_AUC [float]: Test set AUC scores;
test_pred [array]: Test set predictions;
analogously for train_pred (training set) and valid_pred (validation set)
test_labels [array]: Test set labels;
analagously for train_labels (training set) and valid_labels (validation set)
'''
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = net.to(device)
graph = graph.to(device)
test_labels = to_cpu_npy(graph.y[targetNode_mask[test_idx]])
model.eval()
train_status=False
forward_scores = model(graph.x.float(), graph.edge_index, train_status)[targetNode_mask]
test_scores = forward_scores[test_idx]
test_softmax, test_pred = model.calc_softmax_pred(test_scores)
test_softmax = to_cpu_npy(test_softmax)
test_pred = to_cpu_npy(test_pred)
test_AUC = roc_auc_score(test_labels, test_softmax[:,1], average="micro")
test_F1 = f1_score(test_labels, test_pred, average="micro")
train_scores = forward_scores[train_idx]
train_labels = to_cpu_npy(graph.y[targetNode_mask[train_idx]])
train_softmax, train_pred = model.calc_softmax_pred(train_scores)
train_pred = to_cpu_npy(train_pred)
train_F1 = f1_score(train_labels, train_pred, average="micro")
valid_scores = forward_scores[valid_idx]
valid_labels = to_cpu_npy(graph.y[targetNode_mask[valid_idx]])
valid_softmax, valid_pred = model.calc_softmax_pred(valid_scores)
valid_pred = to_cpu_npy(valid_pred)
valid_F1 = f1_score(valid_labels, valid_pred, average="micro")
return test_AUC, test_F1, test_pred, test_labels, train_F1, train_pred, train_labels, \
valid_F1, valid_pred, valid_labels
def to_cpu_npy(x):
'''
Simple helper function to transfer GPU tensors to CPU numpy matrices
Parameters
----------
x [tensor]: PyTorch tensor stored on GPU
Returns
-------
new_x [array]: Numpy array stored on CPU
'''
new_x = x.cpu().detach().numpy()
return new_x
###Set options and random seed
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--cell_line', default='E116', type=str)
parser.add_argument('-e', '--max_epoch', default=1000,type=int)
parser.add_argument('-lr', '--learning_rate', default=1e-4, type=float)
parser.add_argument('-cn', '--num_graph_conv_layers', default=2, type=int)
parser.add_argument('-gs', '--graph_conv_embed_size', default=256, type=int)
parser.add_argument('-ln', '--num_lin_layers', default=3, type=int)
parser.add_argument('-ls', '--lin_hidden_layer_size', default=256, type=int)
args = parser.parse_args()
cell_line = args.cell_line
max_epoch = args.max_epoch
learning_rate = args.learning_rate
num_graph_conv_layers = args.num_graph_conv_layers
graph_conv_embed_sz = args.graph_conv_embed_size
num_lin_layers = args.num_lin_layers
lin_hidden_size = args.lin_hidden_layer_size
random_seed = random.randint(0,10000)
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
###Initialize start time
start_time = time.time()
today = date.today()
mdy = today.strftime("%Y-%m-%d")
clock = datetime.now()
hms = clock.strftime("%H-%M-%S")
hm = clock.strftime("%Hh-%Mm")
hm_colon = clock.strftime("%H:%M")
date_and_time = mdy + '-at-' + hms
###Test for GPU availability
cuda_flag = torch.cuda.is_available()
if cuda_flag:
dev = "cuda"
else:
dev = "cpu"
device = torch.device(dev)
###Load input files
base_path = os.getcwd()
save_dir = os.path.join(base_path, 'data', cell_line, 'saved_runs')
hic_sparse_mat_file = os.path.join(base_path, 'data', cell_line, 'hic_sparse.npz')
np_nodes_lab_genes_file = os.path.join(base_path, 'data', cell_line, 'np_nodes_lab_genes.npy')
np_hmods_norm_all_file = os.path.join(base_path, 'data', cell_line, 'np_hmods_norm.npy')
df_genes_file = os.path.join(base_path, 'data', cell_line, 'df_genes.pkl')
df_genes = pd.read_pickle(df_genes_file)
###Print model specifications
print(os.path.basename(__file__))
print('Model date and time:')
print(date_and_time, '\n\n')
print('Cell line:', cell_line)
print('\n')
print('Training set: 70%')
print('Validation set: 15%')
print('Testing set: 15%')
print('\n')
print('Model hyperparameters: ')
print('Number of epochs:', max_epoch)
print('Learning rate:', learning_rate)
print('Number of graph convolutional layers:', str(num_graph_conv_layers))
print('Graph convolutional embedding size:', graph_conv_embed_sz)
print('Number of linear layers:', str(num_lin_layers))
print('Linear hidden layer size:', lin_hidden_size)
###Define model inputs
num_feat = 5
num_classes = 2
mat = load_npz(hic_sparse_mat_file)
allNodes_hms = np.load(np_hmods_norm_all_file)
hms = allNodes_hms[:, 1:] #only includes features, not node ids
allNodes = allNodes_hms[:, 0].astype(int)
geneNodes_labs = np.load(np_nodes_lab_genes_file)
geneNodes = geneNodes_labs[:, -2].astype(int)
geneLabs = geneNodes_labs[:, -1].astype(int)
allLabs = 2*np.ones(np.shape(allNodes))
allLabs[geneNodes] = geneLabs
targetNode_mask = torch.tensor(geneNodes).long()
X = torch.tensor(hms).float().reshape(-1, 5)
Y = torch.tensor(allLabs).long()
extract = torch_geometric.utils.from_scipy_sparse_matrix(mat)
data = torch_geometric.data.Data(edge_index = extract[0], edge_attr = extract[1], x = X, y = Y)
G = data
num_feat = 5
graph_conv_embed_sizes = (graph_conv_embed_sz,)*num_graph_conv_layers
lin_hidden_sizes = (lin_hidden_size,)*num_lin_layers
###Randomize node order and split into 70%/15%/15% training/validation/test sets
pred_idx_shuff = torch.randperm(targetNode_mask.shape[0])
fin_train = np.floor(0.7*pred_idx_shuff.shape[0]).astype(int)
fin_valid = np.floor(0.85*pred_idx_shuff.shape[0]).astype(int)
train_idx = pred_idx_shuff[:fin_train]
valid_idx = pred_idx_shuff[fin_train:fin_valid]
test_idx = pred_idx_shuff[fin_valid:]
train_gene_ID = targetNode_mask[train_idx].numpy()
valid_gene_ID = targetNode_mask[valid_idx].numpy()
test_gene_ID = targetNode_mask[test_idx].numpy()
###Instantiate neural network model
net = GCN(num_feat, num_graph_conv_layers, graph_conv_embed_sizes, num_lin_layers, lin_hidden_sizes, num_classes)
optimizer = torch.optim.Adam(filter(lambda p : p.requires_grad, net.parameters()),
lr = learning_rate)
### Print model's state_dict
print("\n"+"Model's state_dict:")
for param_tensor in net.state_dict():
print(param_tensor, "\t", net.state_dict()[param_tensor].size())
### Train model
train_loss_vec, train_AUC_vec, valid_loss_vec, valid_AUC_vec = train_model(net, G, max_epoch, learning_rate, targetNode_mask, train_idx, valid_idx, optimizer)
### Evaluate model
test_AUC, test_F1, test_pred, test_labels, train_F1, train_pred, train_labels, \
valid_F1, valid_pred, valid_labels = \
eval_model(net, G, targetNode_mask, train_idx, valid_idx, test_idx)
### Save metrics and node predictions
train_metrics = [train_gene_ID, train_pred, train_labels, train_AUC_vec, train_loss_vec]
valid_metrics = [valid_gene_ID, valid_pred, valid_labels, valid_AUC_vec, valid_loss_vec]
test_metrics = [test_gene_ID, test_pred, test_labels, test_AUC, ['na']]
dataset_list = [train_metrics, valid_metrics, test_metrics]
df_full_metrics =
|
pd.DataFrame(columns=['Dataset','Node ID','True Label','Predicted Label','Classification'])
|
pandas.DataFrame
|
import argparse
from glob import glob
import pandas
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
import numpy as np
255, 127, 14
colors_other=np.array([[31,119,180], [44,160,44], [127,0,255], [0,0,255]])/255
colors_clij=np.array([[255,127,14], [214,39,40], [204,0,102], [204,204,0]])/255
markers_other=['o', 'x', 'o', 'x']
markers_clij=['o', 'x', 'o', 'x']
def get_extended_dataframe(path_to_file="./foo.csv"):
df = pandas.read_csv(path_to_file)
df.insert(loc=0, column="Filename", value=path_to_file.split("/")[-1])
return df
def create_master_dataframe(file_names="./*"):
"""a function to create a master dataframe with filenames and there contents"""
file_list = glob(file_names)
frame_list=[]
for f in file_list:
frame_list.append(get_extended_dataframe(f))
return pandas.concat(frame_list, axis=0, sort=False)
def cond_and(list_of_conds):
res = [True]*len(list_of_conds[0])
for cond in list_of_conds:
res*=cond
return res
def cond_or(list_of_conds):
res = [True]*len(list_of_conds[0])
for cond in list_of_conds:
res+=cond
return res
def plot_errorbar(x, y, e, benchmark, param, color, marker):
plt.errorbar(x,y,e, marker=marker, label=benchmark, color=color, markerfacecolor='None', markersize=10)
plt.fill_between(x, y-e, y+e, alpha=0.5, color=color)
def process_benchmark_versus_param(data_frame, free_param_name, legend_entry, color, marker):
correction_factor = 1e-9 # convert to seconds
# df_bm = data_frame[data_frame["Benchmark"]==benchmark]
# print(df_bm)
x = data_frame[free_param_name]
y = data_frame["Score"]*correction_factor
e = data_frame["Score Error (99.9%)"]*correction_factor
plot_errorbar(x, y, e, legend_entry, free_param_name.replace("Param: ", ""), color, marker)
def process_one_param(data_frame, param_dict, free_param_name, name, benchmarks_to_process, save_dir, verbose=0):
if verbose:
print("------> processing param %s..."%free_param_name)
# list of figures to return
figures = []
# creating the param dictionary without the free params
param_dict_iter = param_dict.copy()
del param_dict_iter[free_param_name]
param_names = list(param_dict_iter.keys())
benchmarks = pandas.unique(data_frame["Benchmark"])
machines = pandas.unique(data_frame["Filename"].str.split(".").str.get(0).str.split("_").str.get(-2))
for params in itertools.product(*param_dict_iter.values()):
fig = plt.figure(figsize=(9, 7), dpi=80)
ax = fig.add_subplot(111)
# plt.clf()
if verbose:
print("-----> other param values " + str(params))
df = data_frame
suff=''
# build the right data frame
for i,p in enumerate(params):
if np.isnan(p):
df = df[np.isnan(df[param_names[i]])]
else:
df = df[df[param_names[i]]==p]
suff+="_%s_"%param_names[i].replace("Param: ", "") + str(p)
id_clij=0
id_other=0
for m in machines:
d = df[df["Filename"].str.split(".").str.get(0).str.split("_").str.get(-2)==m]
# d = d
for j, b in enumerate(benchmarks):
dl = d[d["Benchmark"]==b]
c=None
bs = b.split(".")[-1]
# print(b)
if (("clij" in bs) or ("CLIJ" in bs)):
c = colors_clij[id_clij%4]
mark = markers_clij[id_clij%4]
id_clij+=1
else:
c = colors_other[id_other%4]
mark = markers_other[id_other%4]
id_other+=1
print(d)
benchmark_prefix = "net.haesleinhuepf.clij.benchmark.jmh."
benchmark_short = b.replace(benchmark_prefix, "")
process_benchmark_versus_param(dl, free_param_name, m+"_"+benchmark_short, color=c, marker=mark)
title = name.split("_")[-1]
plt.title(title)
x_axis_label = free_param_name.replace("Param: ","")
if (x_axis_label == "size"):
x_axis_label = "Size / MB"
if (title == 'GaussianBlur2D' or title == 'GaussianBlur3D'):
if (x_axis_label == "radius"):
x_axis_label = "sigma"
plt.xlabel(x_axis_label)
plt.ylabel("Processing time / s")
plt.tight_layout()
if (save_dir != None):
path = "%s/%s.pdf"%(save_dir.strip("/"), name+suff)
plt.savefig(path)
path = "%s/%s.png" % (save_dir.strip("/"), name + suff)
plt.savefig(path)
# fig.savefig(path)
plt.legend(bbox_to_anchor=(1.0, -0.15,), loc=1,
ncol=len(benchmarks), fontsize=10, columnspacing=1)
plt.tight_layout()
if (save_dir != None):
path = "%s/%s_legend.png"%(save_dir.strip("/"), name+suff)
plt.savefig(path)
# fig.savefig(path)
figures.append(fig)
plt.show()
plt.close()
return figures
def plt_setup():
sns.set_style('whitegrid')
font = {'family' : 'normal', 'size' : 22}
plt.rc('font', **font)
# plt.figure(figsize=(15, 15), dpi=80)
def process_frame(data_frame, name=None, save_dir=None, verbose=0):
plt_setup()
figures=[]
if verbose:
print("----- processing a new frame -----")
# identifying operations
operations = pandas.unique(data_frame["Benchmark"].str.split(".").str.get(-2))
if verbose:
print("----- detected operations:")
for o in operations:
print(o)
# identifying machines
machines = pandas.unique(data_frame["Filename"].str.split(".").str.get(0).str.split("_").str.get(-2))
if verbose:
print("----- detected machines:")
for m in machines:
print(m)
# identifying benchmarks
benchmarks = pandas.unique(data_frame["Benchmark"].str.split(".").str.get(-1))
if verbose:
print("----- detected benchmarks:")
for b in benchmarks:
print(b)
# identifying params
params = [col for col in data_frame.columns if col.startswith("Param")]
if verbose:
print("----- detected params:")
for p in params:
print(p)
# building a param_dict
param_dict={}
for param in params:
x =
|
pandas.unique(data_frame[param])
|
pandas.unique
|
import pandas as pd
import numpy as np
from sklearn.linear_model import ElasticNet, ElasticNetCV
from sklearn.model_selection import RepeatedKFold, GridSearchCV
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from scripts.python.routines.betas import betas_drop_na
import pickle
import random
import plotly.express as px
import copy
import statsmodels.formula.api as smf
from sklearn.metrics import mean_squared_error, mean_absolute_error
from scripts.python.pheno.datasets.filter import filter_pheno
from scripts.python.pheno.datasets.features import get_column_name, get_status_dict, get_sex_dict
from scripts.python.routines.plot.scatter import add_scatter_trace
from scipy.stats import mannwhitneyu
import plotly.graph_objects as go
import pathlib
from scripts.python.routines.manifest import get_manifest
from scripts.python.routines.plot.save import save_figure
from scripts.python.routines.plot.layout import add_layout, get_axis
from scripts.python.routines.plot.p_value import add_p_value_annotation
from statsmodels.stats.multitest import multipletests
dataset = "GSEUNN"
path = f"E:/YandexDisk/Work/pydnameth/datasets"
datasets_info = pd.read_excel(f"{path}/datasets.xlsx", index_col='dataset')
platform = datasets_info.loc[dataset, 'platform']
manifest = get_manifest(platform)
status_col = get_column_name(dataset, 'Status').replace(' ','_')
age_col = get_column_name(dataset, 'Age').replace(' ','_')
sex_col = get_column_name(dataset, 'Sex').replace(' ','_')
status_dict = get_status_dict(dataset)
status_passed_fields = status_dict['Control'] + status_dict['Case']
sex_dict = get_sex_dict(dataset)
continuous_vars = {}
categorical_vars = {status_col: [x.column for x in status_passed_fields], sex_col: list(sex_dict.values())}
pheno = pd.read_pickle(f"{path}/{platform}/{dataset}/pheno_xtd.pkl")
pheno = filter_pheno(dataset, pheno, continuous_vars, categorical_vars)
betas =
|
pd.read_pickle(f"{path}/{platform}/{dataset}/betas.pkl")
|
pandas.read_pickle
|
"""
(C) Copyright 2019 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on Aug 22, 2018
"""
import matplotlib.colors
import matplotlib.pyplot as plt
import matplotlib.ticker
import numpy as np
import pandas as pd
import statsmodels.api as sm
from scipy import interp as scipy_interp
from scipy.stats import gaussian_kde
import warnings
from sklearn import metrics
# TODO: propensity distribution using CDF (and not reflecting if so)
# TODO: Make plot "names" (as in the string which are their common name in the code) global variables in this module
# instead of "magics" in the Evaluator module
# TODO: consider making plots to not rely on pandas input (and can work more generally with numpy)?
# TODO: consider refactoring each type (family?) of plots to its own module (unify through __init__?)
# TODO: consider making the Evaluator able to plot each plot separately?
# TODO: consider making plot module be class-based instead, taking its argument during init
# and having a `plot()` interface
def _calculate_mutual_bins(x, y, bins="auto"):
"""
A common support for two vectors.
Args:
x (pd.Series):
y (pd.Series):
bins: compatible with numpy's bins parameter.
Returns:
np.array: bins cutoffs.
"""
data = np.append(x, y)
bins = np.histogram(data, bins=bins)[1]
return bins
def plot_counterfactual_common_support(prediction, a, ax=None):
cv = [np.arange(a.shape[0])]
ax = plot_counterfactual_common_support_folds([prediction], hue_by=a, cv=cv, ax=ax)
return ax
def plot_counterfactual_common_support_folds(predictions, hue_by, cv, alpha_by_density=True, ax=None):
"""Plot the scatter plot of y0 vs. y1 for multiple scoring results, colored by the treatment
Args:
predictions (list[pd.Series]): List, the size of number of folds, of outcome prediction values.
hue_by (pd.Series): Group assignment (as in treatment assignment) of the entire dataset.
(indices from `cv` will be used to slice this vector)
cv (list[np.array]): List, the size of number of folds, of row indices (as in iloc locations) - the indices
of samples participating the fold.
alpha_by_density (bool): Whether to calculate points alpha value (transparent-opaque) with density estimation.
This can take some time to compute for large number of points.
If False, alpha calculation will be a simple fast heuristic.
ax (plt.Axes): The axes on which the plot will be displayed. Optional.
"""
effect_folds = [(prediction.iloc[:, 1] - prediction.iloc[:, 0]).mean() for prediction in predictions]
predictions = pd.concat(predictions) # type: pd.DataFrame
treatment = pd.concat([hue_by.iloc[fold_idx] for fold_idx in cv]) # type: pd.Series
ax = _scatter_hue(predictions.iloc[:, 0], predictions.iloc[:, 1], treatment, alpha_by_density, ax=ax)
effect_label = r"mean effect={:.2g}".format(np.mean(effect_folds))
effect_label += r"$\pm${:.2g}".format(np.std(effect_folds)) if len(effect_folds) > 1 else ""
ax.plot([], [], color=ax.get_facecolor(), # Use background color
label=effect_label)
_add_diagonal(ax)
ax.legend(loc="best")
ax.set_xlabel(r"Predicted $Y^0$")
ax.set_ylabel(r"Predicted $Y^1$")
ax.set_title("Predicted Common Support")
return ax
def plot_continuous_prediction_accuracy(predictions, y, a, alpha_by_density=True, ax=None):
cv = [np.arange(a.shape[0])]
ax = plot_continuous_prediction_accuracy_folds([predictions], y, a, cv, alpha_by_density, ax=ax,
plot_residuals=False)
return ax
def plot_continuous_prediction_accuracy_folds(predictions, y, a, cv, alpha_by_density=True, plot_residuals=False,
ax=None):
# Concatenate data across folds:
treatments = []
outcomes = []
predictions_on_actual = []
r2_scores = []
for fold_prediction, fold_idx in zip(predictions, cv):
fold_a = a.iloc[fold_idx]
fold_y = y.iloc[fold_idx]
if plot_residuals:
fold_y = fold_y - fold_prediction
r2_scores.append(metrics.r2_score(fold_y, fold_prediction))
treatments.append(fold_a)
outcomes.append(fold_y)
predictions_on_actual.append(fold_prediction)
treatments = pd.concat(treatments) # type: pd.Series
outcomes = pd.concat(outcomes) # type: pd.Series
predictions_on_actual = pd.concat(predictions_on_actual) # type: pd.Series
ax = _scatter_hue(predictions_on_actual, outcomes, treatments, alpha_by_density, ax)
# R-squared label:
if not plot_residuals:
r2_label = r"$R^2={:.2f}".format(np.mean(r2_scores))
r2_label += r"\pm{:.2f}$".format(np.std(r2_scores)) if len(r2_scores) > 1 else "$"
ax.plot([], [], color=ax.get_facecolor(), label=r2_label) # invisible color so as to not show line in legend
_add_diagonal(ax)
ax.legend(loc="best")
ax.set_xlabel("Predicted values")
ax.set_ylabel("Prediction residuals" if plot_residuals else "True values")
ax.set_title("Residual Plot" if plot_residuals else "Continuous Accuracy Plot")
return ax
def plot_residual_folds(predictions, y, a, cv, alpha_by_density=True, ax=None):
ax = plot_continuous_prediction_accuracy_folds(predictions, y, a, cv, alpha_by_density, plot_residuals=True,
ax=ax)
ax.axhline(0.0, linestyle="--", color="grey", zorder=0, alpha=0.75)
return ax
def plot_residual(predictions, y, a, alpha_by_density=True, ax=None):
cv = [np.arange(a.shape[0])]
ax = plot_residual_folds([predictions], y, a, cv, alpha_by_density, ax)
return ax
def _scatter_hue(x, y, hue, alpha_by_density=True, ax=None):
ax = ax or plt.gca()
points_rgba = _get_alpha_per_point_with_density(X=[x, y], hue=hue) if alpha_by_density else None
for i, treatment_val in enumerate(np.sort(np.unique(hue))):
idx_mask = hue == treatment_val # type: pd.Series
cur_color = points_rgba.loc[idx_mask].values if points_rgba is not None else None
cur_alpha = np.clip(10 / np.sqrt(idx_mask.sum()), 0.01, 1)
ax.scatter(x=x.loc[idx_mask], y=y.loc[idx_mask],
alpha=cur_alpha if points_rgba is None else None,
facecolor=cur_color, edgecolors="none",
label="treatment={}".format(treatment_val))
return ax
def _get_alpha_per_point_with_density(X, hue, min_alpha_bound=0.3, max_alpha_bound=1.0):
"""
Matplotlib does not support pointwise alpha values (rather, constant value for an entire plt.plot()).
This function will utilize a supported pointwise color-scheme, using rgba, and passing the individual alpha values
as the 4th dimension ('a') of the rgba.
Args:
X: in a form compatible with statsmodels' KDEMultivariate (list of pd.Series, or pd.DataFrame)
hue (pd.Series): A vector with group assignment for each point in x.
min_alpha_bound (float | None): Value between 0 and 1, used to linearly rescale the alpha values.
If None, rescale is avoided.
Default of 0.3, since lower values are usually too unobservable.
max_alpha_bound (float | None): Value between 0 and 1, used to linearly rescale the alpha values.
If None, rescale is avoided.
Returns:
"""
points_rgba = pd.DataFrame(index=hue.index, columns=list("rgba"), dtype=np.float64)
# Calculate alpha for each point based on its density:
kde = sm.nonparametric.KDEMultivariate(data=X, var_type='cc', bw="normal_reference")
# kde.bw = kde.bw * 0.5 # Rescale bandwidth to be narrower
points_density = kde.pdf(X)
points_alpha = 1 / points_density # Invert values - the denser the point -> the lower its alpha (more transparent)
if (min_alpha_bound is not None) and (max_alpha_bound is not None):
# Rescale alphas (linearly) to the range of 0.3 to 1:
points_alpha = (min_alpha_bound + (max_alpha_bound - min_alpha_bound) *
((points_alpha - points_alpha.min()) / (points_alpha.max() - points_alpha.min())))
points_rgba["a"] = points_alpha # Assign the alpha values
for i, hue_val in enumerate(np.sort(np.unique(hue))):
idx_mask = hue == hue_val
cur_color = "C{}".format(i) # Cycle through the colors
cur_color = matplotlib.colors.to_rgb(cur_color) # Get RGB value of the current color
points_rgba.loc[idx_mask, ["r", "g", "b"]] = cur_color # Assign that constant RGB val for all current points
return points_rgba
def plot_calibration_folds(predictions, targets, cv, n_bins=10, plot_se=True,
plot_rug=False, plot_histogram=False, quantile=False, ax=None):
"""Plot calibration curves for multiple models (presumably in folds)
Args:
predictions (list[pd.Series]): list (each entry of a fold) of arrays - probability ("scores") predictions.
targets (pd.Series): true labels to calibrate against on the overall data (not divided to folds).
cv (list[np.array]):
n_bins (int): number of bins to evaluate in the plot
plot_se (bool): Whether to plot standard errors around the mean bin-probability estimation.
plot_rug:
plot_histogram:
quantile (bool): If true, the binning of the calibration curve is by quantiles. Default is false
ax (plt.Axes): Optional
Note:
One of plot_propensity or plot_model must be True.
Returns:
"""
for i, idx_fold in enumerate(cv):
predictions_fold = predictions[i]
target_fold = targets.iloc[idx_fold]
ax = _plot_calibration_single(y_true=target_fold, y_prob=predictions_fold, n_bins=n_bins, plot_diagonal=False,
plot_se=plot_se, plot_rug=plot_rug, plot_histogram=plot_histogram,
quantile=quantile, label="fold {}".format(i), ax=ax)
_add_diagonal(ax)
ax.legend(loc="best")
# ax.set_title("{} Calibration".format("Propensity" if y is None else "Outcome"))
ax.set_title("Calibration")
return ax
def plot_calibration(predictions, targets, n_bins=10, plot_se=True,
plot_rug=False, plot_histogram=True, quantile=False, ax=None):
cv = [np.arange(predictions.shape[0])]
return plot_calibration_folds([predictions], targets, cv=cv, n_bins=n_bins, plot_se=plot_se,
plot_rug=plot_rug, plot_histogram=plot_histogram, quantile=quantile, ax=ax)
def _plot_calibration_single(y_true, y_prob, n_bins=10, plot_diagonal=True,
plot_se=True, plot_rug=False, plot_histogram=False,
quantile=False, label=None, ax=None):
"""Plot a calibration curve showing how well y_prob predicts the probability of a binary outcome y
The standard deviation of a binomial distribution p(1-p)/sqrt(n) is used to calculate the values for which p
would be one standard deviation away. This means we are looking for
r +/- sqrt(r(1-r)/n) = p
This provides a cubic equation for r whose solution is
r = (2np+1 +/- sqrt(4np(1-p)+1)) / (2n+2)
Args:
y_prob (pd.Series):
y_true (pd.Series):
n_bins (int): the number of bins to use for the calibration plot
plot_se (bool): Whether to plot standard errors around the
mean bin-probability estimation.
plot_diagonal (bool): Whether to plot a diagonal line or not.
plot_rug (bool): Whether to plot rug of the prediction
plot_histogram (bool): Whether to plot histogram at the background.
quantile (bool): If False specifies equal sized bins,
if True splits the probabilities into n_bins quantiles.
ax (plt.Axes):
label(str): The label for the plotted line
Returns:
"""
ax = ax or plt.gca()
if quantile:
bins = np.unique(np.percentile(y_prob, np.linspace(0, 100, n_bins + 1).astype(int)))
bins = bins if len(bins) > 1 else np.concatenate([bins, bins]) # in case all values of y_prob are the same
bins[-1] += 1e-8
prob_true, prob_pred, counts = calibration_curve(y_true, y_prob, bins=bins)
else:
prob_true, prob_pred, counts = calibration_curve(y_true, y_prob, bins=n_bins)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
if plot_rug:
ax.plot(y_prob, np.full_like(y_prob, 0.01), "|", color="black", alpha=0.7)
line_color = None
if plot_histogram:
hist_line = ax.plot(bins, (counts / counts.sum()), drawstyle="steps-post", alpha=0.8)
hist_line = hist_line[0]
hist_line.set_zorder(2) # keep histogram behind any new lines that are plotted after it.
line_color = hist_line.get_color() # if plotting hist, keep track of color to use in the line to be plotted
if plot_diagonal:
_add_diagonal(ax)
lines = ax.plot(prob_pred, prob_true, "s-", color=line_color, label=label)
# Plot standard error:
if plot_se:
disc = (4 * counts * prob_true) * (1 - prob_true) + 1
upper = (2 * counts * prob_true + 1 + np.sqrt(disc)) / (2 * counts + 2)
lower = (2 * counts * prob_true + 1 - np.sqrt(disc)) / (2 * counts + 2)
ax.fill_between(x=prob_pred, y1=lower, y2=upper, color=lines[-1].get_color(), alpha=0.5)
ax.set_xlabel('Predicted probability')
ax.set_ylabel('Observed probability')
return ax
def calibration_curve(y_true, y_prob, bins=5):
"""
Compute calibration curve of a classifier given its scores output and true label assignment.
Args:
y_true (pd.Series): True binary label assignment.
y_prob (pd.Series): Predicted probability of each sample being the positive label.
bins (int | list | np.ndarray | pd.Series):
If int, it defines the number of equal-width bins in the
given range (5, by default).
If bins a sequence, it defines the bin edges, including the
rightmost edge, allowing for non-uniform bin widths.
Returns:
(pd.Series, pd.Series, pd.Series): empirical_prob, predicted_prob, bin_counts
empirical_prob: The fraction of positive labels in each bins
predicted_prob: The average of predicted probability in each bin
bin_counts: The number of samples fallen in each bin
References:
[1] <NAME>., & <NAME>. (2002, July).
Transforming classifier scores into accurate multiclass probability estimates
"""
# Get binning out of provided bins
if type(bins) is int:
bins = np.linspace(0., 1. + 1e-8, bins + 1)
elif hasattr(bins, '__len__') and not isinstance(bins, str): # Some sort of vector
bins = np.sort(np.ravel(bins))
if y_prob.max() > bins.max() or y_prob.min() < bins.min():
raise ValueError("y_prob has values outside the provided bins")
else:
raise TypeError("bins must either be an integer or a sequence of scalars")
bin_of_samples = pd.cut(y_prob, bins, labels=np.arange(len(bins) - 1)).astype(int)
predicted_prob = y_prob.groupby(bin_of_samples).mean()
empirical_prob = y_true.groupby(bin_of_samples).mean()
bin_counts = bin_of_samples.value_counts(sort=False)
return empirical_prob, predicted_prob, bin_counts
def plot_roc_curve_folds(curve_data, ax=None, plot_folds=False, label_folds=False, label_std=False, **kwards):
num_of_curves = len(curve_data.keys())
color_list = ["C{}".format(_) for _ in range(num_of_curves)]
for (curve_name, curve_data), color in zip(curve_data.items(), color_list):
fprs = curve_data["FPR"]
tprs = curve_data["TPR"]
aucs = curve_data["AUC"]
ax = _plot_single_performance_curve(fprs, tprs, aucs, "AUC",
color, curve_name,
label_std, label_folds, plot_folds, num_of_curves != 1,
ax)
# Plot chance curve:
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='black', label='Chance', alpha=.8)
ax.set_xlim(left=-0.05, right=1.05)
ax.set_ylim(bottom=-0.05, top=1.05)
ax.set_xlabel("False Positive Rate")
ax.set_ylabel("True Positive Rate")
ax.set_title("ROC Curve")
ax.legend(loc="lower right")
return ax
def plot_precision_recall_curve_folds(curve_data, ax=None,
plot_folds=False, label_folds=False, label_std=False, **kwards):
# TODO: Check why it does not end at class prevalence (for recall=1.0)
num_of_curves = len(curve_data.keys())
color_list = ["C{}".format(_) for _ in range(num_of_curves)]
pos_class_prevalence = curve_data.pop("prevalence", None)
for (curve_name, curve_data), color in zip(curve_data.items(), color_list):
recalls = curve_data["Recall"]
precisions = curve_data["Precision"]
aps = curve_data["AP"]
ax = _plot_single_performance_curve(recalls, precisions, aps, "AP",
color, curve_name,
label_std, label_folds, plot_folds, num_of_curves != 1,
ax)
# Plot chance curve:
if pos_class_prevalence is not None:
ax.plot([0, 1], [pos_class_prevalence, pos_class_prevalence],
linestyle='--', lw=2, color='black', label='Chance', alpha=.8)
ax.set_xlim(left=-0.05, right=1.05)
ax.set_ylim(bottom=-0.05, top=1.05)
ax.set_xlabel("Recall")
ax.set_ylabel("Precision")
ax.set_title("PR Curve")
ax.legend(loc="lower left")
return ax
def _plot_single_performance_curve(xs, ys, areas, areas_type, color="C0", curve_name="",
label_std=False, label_folds=False,
plot_folds=False, colored_folds=False, ax=None):
ax = ax or plt.gca()
assert len(xs) == len(ys) == len(areas)
n_folds = len(xs)
x_domain = np.linspace(0, 1, 100)
ys_interp = []
for i in range(n_folds):
if areas_type == "AP": # precision/recall need to be reversed for interpolation
ys_interp.append(scipy_interp(x_domain, xs[i][::-1], ys[i][::-1]))
else:
ys_interp.append(scipy_interp(x_domain, xs[i], ys[i]))
ys_interp[-1][0] = 0.0
area = areas[i]
folds_label = 'Fold {} ({} = {:.2f})'.format(i, areas_type, area) if label_folds else None
if plot_folds:
folds_color = None if colored_folds else color # use multiple colors if plotting only one stratum
ax.plot(xs[i], ys[i], lw=1, alpha=0.3, color=folds_color,
label=folds_label)
# Plot main (folds average) curve
mean_ys = np.nanmean(ys_interp, axis=0)
# if areas_type == "AUC":
# mean_ys[-1] = 1.0
mean_area = np.nanmean(areas)
std_area = np.nanstd(areas)
ax.plot(x_domain, mean_ys, color=color,
label=r'{} ({} = {:.2f} $\pm$ {:.2f})'.format(curve_name, areas_type, mean_area, std_area),
lw=2, alpha=.9)
# Plot uncertainty around main curve:
ys_std = np.std(ys_interp, axis=0)
upper_ys = np.minimum(mean_ys + ys_std, 1)
lower_ys = np.maximum(mean_ys - ys_std, 0)
std_label = r'$\pm$ 1 std. dev.' if label_std else None
ax.fill_between(x_domain, lower_ys, upper_ys, color=color, alpha=.2, label=std_label)
return ax
def plot_propensity_score_distribution(propensity, treatment, reflect=True, kde=False,
cumulative=False, norm_hist=True, ax=None):
"""
Plot the distribution of propensity score
Args:
propensity (pd.Series):
treatment (pd.Series):
reflect (bool): Whether to plot second treatment group on the opposite sides of the x-axis.
This can only work if there are exactly two groups.
kde (bool): Whether to plot kernel density estimation
cumulative (bool): Whether to plot cumulative distribution.
norm_hist (bool): If False - use raw counts on the y-axis.
If kde=True, then norm_hist should be True as well.
ax (plt.Axes | None):
Returns:
"""
# assert propensity.index.symmetric_difference(a.index).size == 0
ax = ax or plt.gca()
if kde and not norm_hist:
warnings.warn("kde=True and norm_hist=False is not supported. Forcing norm_hist from False to True.")
norm_hist = True
bins = np.histogram(propensity, bins="auto")[1]
plot_params = dict(bins=bins, density=norm_hist, alpha=0.5, cumulative=cumulative)
unique_treatments = np.sort(np.unique(treatment))
for treatment_number, treatment_value in enumerate(unique_treatments):
cur_propensity = propensity.loc[treatment == treatment_value]
cur_color = "C{}".format(treatment_number)
ax.hist(cur_propensity, label="treatment = {}".format(treatment_value),
color=[cur_color], **plot_params)
if kde:
cur_kde = gaussian_kde(cur_propensity)
min_support = max(0, cur_propensity.values.min() - cur_kde.factor)
max_support = min(1, cur_propensity.values.max() + cur_kde.factor)
X_plot = np.linspace(min_support, max_support, 200)
if cumulative:
density = np.array([cur_kde.integrate_box_1d(X_plot[0], x_i) for x_i in X_plot])
ax.plot(X_plot, density, color=cur_color, )
else:
ax.plot(X_plot, cur_kde.pdf(X_plot), color=cur_color, )
if reflect:
if len(unique_treatments) != 2:
raise ValueError("Reflecting density across X axis can only be done for two groups. "
"This one has {}".format(len(unique_treatments)))
# Update line:
if kde:
last_line = ax.get_lines()[-1]
last_line.set_ydata(-1 * last_line.get_ydata())
# Update histogram bars:
idx_of_first_hist_rect = \
[patch.get_label() for patch in ax.patches].index('treatment = {}'.format(unique_treatments[-1]))
for patch in ax.patches[idx_of_first_hist_rect:]:
patch.set_height(-1 * patch.get_height())
# Re-set the view of axes:
ax.relim()
ax.autoscale()
# Remove negation sign from lower y-axis:
ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, pos: str(x) if x >= 0 else str(-x)))
ax.legend(loc="best")
x_type = "Propensity" if propensity.between(0, 1, inclusive="both").all() else "Weights"
ax.set_xlabel(x_type)
y_type = "Probability density" if norm_hist else "Counts"
ax.set_ylabel(y_type)
ax.set_title("{} Distribution".format(x_type))
return ax
def plot_propensity_score_distribution_folds(predictions, hue_by, cv,
reflect=True, kde=False, cumulative=False,
norm_hist=True, ax=None):
"""
Args:
predictions (list[pd.Series]):
X (pd.DataFrame):
hue_by (pd.Series):
y (pd.Series):
cv (list[np.array]):
reflect (bool): Whether to plot second treatment group on the opposite sides of the x-axis.
This can only work if there are exactly two groups.
kde (bool): Whether to plot kernel density estimation
cumulative (bool): Whether to plot cumulative distribution.
norm_hist (bool): If False - use raw counts on the y-axis.
If kde=True, then norm_hist should be True as well.
ax (plt.Axis):
Returns:
"""
propensity = pd.concat(predictions) # type: pd.Series
# treatment = hue_by # if train phase then there will be no duplication of records.
treatment = pd.concat([hue_by.iloc[fold_idx] for fold_idx in cv]) # type: pd.Series
ax = plot_propensity_score_distribution(propensity, treatment,
reflect=reflect, kde=kde, cumulative=cumulative,
norm_hist=norm_hist, ax=ax)
return ax
def plot_mean_features_imbalance_love_folds(table1_folds, cv=None, aggregate_folds=True,
thresh=None, plot_semi_grid=True, ax=None):
method_pretty_name = {"smd": "Standard Mean Difference",
"abs_smd": "Absolute Standard Mean Difference",
"ks": "Kolmogorov-Smirnov"}
ax = ax or plt.gca()
# Aggregate across folds. This will be used to determine order, and extreme values.
# Use this groupby trick: https://stackoverflow.com/a/25058102
aggregated_table1 =
|
pd.concat(table1_folds)
|
pandas.concat
|
#! python3
"""Process data acquired from the Malvern Mastersizer 2000. The csv output contains lots of factor information with the numeric data towards the end. A common feature of the classes and modules is to split thse datasets into associate 'head' and 'data' subsets so that the numerical data can be processed independantly."""
from os.path import join
import datetime
import pandas as pd
from ...database.csv import MetaData
from .psa import PSD
def csv_to_df(*paths):
"""Load an exported Mastersizer 2000 csv into a pandas dataframe. Two headers exist and dates should be parsed. Multiple paths to csv files can be entered with the results being concatenated together."""
get_df = lambda p: pd.read_csv(p, header = [0,1], parse_dates = [5,6])
df = pd.concat([get_df(i) for i in paths])
df.reset_index(drop=True, inplace=True)
return(df)
def timestamp_to_day(Series):
"""Convert a timestamp type pandas series to YY-mm-dd format"""
s = Series
fmt = pd.to_datetime(s.dt.strftime('%Y-%m-%d'))
return(fmt)
class MS2000(PSD):
"""Create an object from a row of Mastersizer 2000 CSV."""
def __init__(self, data, metadata=pd.Series(dtype='float64'), replicates=None):
""""""
m, d = metadata, data
if not m.empty:
self._meta_to_self(m)
self.ms2000 = data
self.precision = 6
self.ms2000_replicates = replicates
self.replicates = [i.psd for i in replicates] if replicates else replicates
mm, frequency = self._get_psd()
self.psd = PSD(mm, frequency, self.precision, self.replicates)
def _meta_to_self(self, meta):
"""Add metadata as attributes of the current object."""
row = meta
for i in meta.keys():
v = getattr(row, i)
k = i.replace('[4, 3] - ','').replace('[3, 2] - ','')
k = k.replace('(0.1)','10').replace('(0.5)','50').replace('(0.9)','90')
k = k.rstrip()
k = k.replace(' ','_')
k = k.lower()
setattr(self, k, v)
def _get_psd(self):
"""Return data as a dataframe of size limits and frequency"""
df = self.ms2000.copy().reset_index()
df['index'] = df['index'].astype('float') / 1000
df.columns = ['mm', 'frequency']
df['frequency'] = [round(i,6) for i in df['frequency']]
df.dropna(inplace=True)
return(df.mm, df.frequency)
def clip_psd(self):
""""""
def get_date(self):
"""Return the analysis timestamp in a useful format."""
print(self.analysis_date_and_time)
class MS2000CSV(MS2000):
""""""
def __init__(self, *paths):
"""Import a Mastersizer 2000 exported CSV and split into data and metadata."""
df = self._load_csv(*paths)
self.meta, self.data = self._split_df(df)
self.idx = self.active = df.index
def _load_csv(self, *paths):
""""""
df = csv_to_df(*paths)
return(df)
def _split_df(self, df):
"""Split the raw dataframe into separate data and metadata dataframes"""
h2 = df.columns.get_level_values(1)
hix, dix = [], []
for n, i in enumerate(h2):
try:
float(i)
dix += [n]
except:
hix += [n]
h = df[df.columns[(hix)]]
d = df[df.columns[(dix)]]
h.columns = h.columns.droplevel(level = 1)
d.columns = d.columns.droplevel(level = 0)
return(h, d)
def name_to_idx(self, name):
"""Return indices of a given sample name from the complete dataset."""
sample_names = self.meta['Sample Name'].astype('str')
idx = sample_names[sample_names==str(name)].index
return(idx)
def date_to_idx(self, date):
"""Return indices of given sample date from the complete dataset."""
m = self.meta['Measurement date and time']
df =
|
pd.to_datetime(m)
|
pandas.to_datetime
|
import hashlib
import json
import logging
import os
from datetime import datetime
from io import BytesIO
from pathlib import Path
import ckanapi
import pandas as pd
import requests
from airflow import DAG
from airflow.models import Variable
from airflow.operators.dummy import DummyOperator
from airflow.operators.python import BranchPythonOperator, PythonOperator
from dateutil import parser
from utils import airflow_utils, ckan_utils
from utils_operators.slack_operators import task_success_slack_alert, task_failure_slack_alert, GenericSlackOperator
PACKAGE_ID = Path(os.path.abspath(__file__)).name.replace(".py", "")
ACTIVE_ENV = Variable.get("active_env")
SRC_FILE = "http://opendata.toronto.ca/shelter.support.housing.administration/toronto-shelter-system-flow/toronto_shelter_system_flow.csv" # noqa: E501
RESOURCE_NAME = "toronto-shelter-system-flow"
EXPECTED_COLUMNS = [
"date(mmm-yy)",
"population_group",
"returned_from_housing",
"returned_to_shelter",
"newly_identified",
"moved_to_housing",
"no_recent_shelter_use",
"actively_homeless",
"ageunder16",
"age16-24",
"age25-44",
"age45-64",
"age65over",
"gender_male",
"gender_female",
"gender_transgender,non-binary_or_two_spirit",
"population_group_percentage",
]
def send_failure_message():
airflow_utils.message_slack(
name=PACKAGE_ID,
message_type="error",
msg="Job not finished",
active_env=ACTIVE_ENV,
prod_webhook=ACTIVE_ENV == "prod",
)
with DAG(
PACKAGE_ID,
default_args=airflow_utils.get_default_args(
{
"on_failure_callback": task_failure_slack_alert,
"start_date": datetime(2020, 11, 24, 13, 35, 0),
"retries": 0,
# "retry_delay": timedelta(minutes=3),
}
),
description="Take toronto_shelter_system_flow.csv from NAS and insert to datastore",
schedule_interval="0 20 * * *",
catchup=False,
tags=["dataset"],
) as dag:
CKAN_CREDS = Variable.get("ckan_credentials_secret", deserialize_json=True)
CKAN = ckanapi.RemoteCKAN(**CKAN_CREDS[ACTIVE_ENV])
def send_success_msg(**kwargs):
ti = kwargs.pop("ti")
msg = ti.xcom_pull(task_ids="build_message")
airflow_utils.message_slack(
name=PACKAGE_ID,
message_type="success",
msg=msg,
prod_webhook=ACTIVE_ENV == "prod",
active_env=ACTIVE_ENV,
)
def get_file(**kwargs):
ti = kwargs.pop("ti")
tmp_dir = Path(ti.xcom_pull(task_ids="create_tmp_dir"))
response = requests.get(SRC_FILE)
file_content = response.content
data = pd.read_csv(BytesIO(file_content), encoding="latin1")
filename = "new_data_raw"
filepath = tmp_dir / f"{filename}.parquet"
logging.info(f"Read {data.shape[0]} records")
data.to_parquet(filepath, engine="fastparquet", compression=None)
file_last_modified = response.headers["last-modified"]
return {"path": filepath, "file_last_modified": file_last_modified}
def get_package():
return CKAN.action.package_show(id=PACKAGE_ID)
def is_resource_new(**kwargs):
ti = kwargs.pop("ti")
package = ti.xcom_pull(task_ids="get_package")
logging.info(f"resources found: {[r['name'] for r in package['resources']]}")
is_new = RESOURCE_NAME not in [r["name"] for r in package["resources"]]
if is_new:
return "resource_is_new"
return "resource_is_not_new"
def get_resource():
package = ckan_utils.get_package(ckan=CKAN, package_id=PACKAGE_ID)
resources = package["resources"]
resource = [r for r in resources if r["name"] == RESOURCE_NAME][0]
return resource
def backup_previous_data(**kwargs):
ti = kwargs.pop("ti")
package = ti.xcom_pull(task_ids="get_package")
backups = Path(Variable.get("backups_dir")) / PACKAGE_ID
resource_id = [r for r in package["resources"] if r["name"] == RESOURCE_NAME][
0
]["id"]
logging.info(f"Resource ID: {resource_id}")
record_count = CKAN.action.datastore_search(id=resource_id, limit=0)["total"]
datastore_response = CKAN.action.datastore_search(
id=resource_id, limit=record_count
)
records = datastore_response["records"]
if len(records) > 0:
logging.info(f"Example record retrieved: {json.dumps(records[0])}")
else:
logging.info("Datastore resource was empty")
data = pd.DataFrame(records)
logging.info(f"Columns: {data.columns.values}")
if "_id" in data.columns.values:
data = data.drop("_id", axis=1)
data_hash = hashlib.md5()
data_hash.update(
data.sort_values(by=[c for c in data.columns.values])
.to_csv(index=False)
.encode("utf-8")
)
unique_id = data_hash.hexdigest()
logging.info(f"Unique ID generated: {unique_id}")
data_path = backups / f"data.{unique_id}.parquet"
if not data_path.exists():
data.to_parquet(data_path, engine="fastparquet", compression=None)
fields = [f for f in datastore_response["fields"] if f["id"] != "_id"]
fields_path = backups / f"fields.{unique_id}.json"
if not fields_path.exists():
with open(fields_path, "w") as f:
json.dump(fields, f)
return {
"fields": fields_path,
"data": data_path,
"records": data.shape[0],
"columns": data.shape[1],
}
def get_new_data_unique_id(**kwargs):
ti = kwargs.pop("ti")
data_fp = Path(ti.xcom_pull(task_ids="transform_data"))
data = pd.read_parquet(data_fp)
data_hash = hashlib.md5()
data_hash.update(
data.sort_values(by=[c for c in data.columns.values])
.round(10)
.to_csv(index=False)
.encode("utf-8")
)
return data_hash.hexdigest()
def is_data_new(**kwargs):
ti = kwargs.pop("ti")
data_to_load_unique_id = ti.xcom_pull(task_ids="get_new_data_unique_id")
backups = Path(Variable.get("backups_dir")) / PACKAGE_ID
for f in os.listdir(backups):
if not os.path.isfile(backups / f):
continue
logging.info(f"File in backups: {f}")
if os.path.isfile(backups / f) and data_to_load_unique_id in f:
logging.info(
f"Data has already been loaded, ID: {data_to_load_unique_id}"
)
return "data_is_not_new"
logging.info(f"Data has not been loaded, new ID: {data_to_load_unique_id}")
return "data_is_new"
def delete_previous_records(**kwargs):
ti = kwargs.pop("ti")
resource_id = ti.xcom_pull(task_ids="get_resource")["id"]
backup = ti.xcom_pull(task_ids="backup_previous_data")
if backup is not None:
CKAN.action.datastore_delete(id=resource_id, filters={})
record_count = CKAN.action.datastore_search(id=resource_id, limit=0)[
"total"
]
msg = f"Records in resource after cleanup: {record_count}"
else:
msg = "No backups found, nothing to delete"
logging.info(msg)
def transform_data(**kwargs):
ti = kwargs.pop("ti")
tmp_dir = Path(ti.xcom_pull(task_ids="create_tmp_dir"))
data_fp = Path(ti.xcom_pull(task_ids="get_file")["path"])
data = pd.read_parquet(data_fp)
data = data.dropna(axis=0, how="all")
data = data[[c for c in data.columns if "unnamed" not in c.lower()]]
data = data.rename(columns={name: name.strip() for name in data.columns})
data[[c for c, d in data.dtypes.items() if d.name == "float64"]] = (
data[[c for c, d in data.dtypes.items() if d.name == "float64"]]
.fillna(0)
.astype("int64")
)
data["population_group_percentage"] = (
data["population_group_percentage"].str.replace("%", "").astype("float64")
)
filename = "new_data_transformed"
filepath = tmp_dir / f"{filename}.parquet"
data.to_parquet(filepath, engine="fastparquet", compression=None)
return filepath
def validate_expected_columns(**kwargs):
ti = kwargs.pop("ti")
data_fp = Path(ti.xcom_pull(task_ids="transform_data"))
df =
|
pd.read_parquet(data_fp)
|
pandas.read_parquet
|
import itertools
import re
import os
import time
import copy
import json
import Amplo
import joblib
import shutil
import warnings
import numpy as np
import pandas as pd
from tqdm import tqdm
from typing import Union
from pathlib import Path
from datetime import datetime
from shap import TreeExplainer
from shap import KernelExplainer
from sklearn import metrics
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from Amplo import Utils
from Amplo.AutoML.Sequencer import Sequencer
from Amplo.AutoML.Modeller import Modeller
from Amplo.AutoML.DataSampler import DataSampler
from Amplo.AutoML.DataExplorer import DataExplorer
from Amplo.AutoML.DataProcessor import DataProcessor
from Amplo.AutoML.DriftDetector import DriftDetector
from Amplo.AutoML.FeatureProcessor import FeatureProcessor
from Amplo.AutoML.IntervalAnalyser import IntervalAnalyser
from Amplo.Classifiers.StackingClassifier import StackingClassifier
from Amplo.Documenting import BinaryDocumenting
from Amplo.Documenting import MultiDocumenting
from Amplo.Documenting import RegressionDocumenting
from Amplo.GridSearch import BaseGridSearch
from Amplo.GridSearch import HalvingGridSearch
from Amplo.GridSearch import OptunaGridSearch
from Amplo.Observation import DataObserver
from Amplo.Observation import ProductionObserver
from Amplo.Regressors.StackingRegressor import StackingRegressor
class Pipeline:
def __init__(self, **kwargs):
"""
Automated Machine Learning Pipeline for tabular data.
Designed for predictive maintenance applications, failure identification, failure prediction, condition
monitoring, etc.
Parameters
----------
Main Parameters:
main_dir [str]: Main directory of Pipeline (for documentation)
target [str]: Column name of the output/dependent/regressand variable.
name [str]: Name of the project (for documentation)
version [int]: Pipeline version (set automatically)
mode [str]: 'classification' or 'regression'
objective [str]: from sklearn metrics and scoring
Data Processor:
int_cols [list[str]]: Column names of integer columns
float_cols [list[str]]: Column names of float columns
date_cols [list[str]]: Column names of datetime columns
cat_cols [list[str]]: Column names of categorical columns
missing_values [str]: [DataProcessing] - 'remove', 'interpolate', 'mean' or 'zero'
outlier_removal [str]: [DataProcessing] - 'clip', 'boxplot', 'z-score' or 'none'
z_score_threshold [int]: [DataProcessing] If outlier_removal = 'z-score', the threshold is adaptable
include_output [bool]: Whether to include output in the training data (sensible only with sequencing)
Feature Processor:
extract_features [bool]: Whether to use FeatureProcessing module
information_threshold : [FeatureProcessing] Threshold for removing co-linear features
feature_timeout [int]: [FeatureProcessing] Time budget for feature processing
max_lags [int]: [FeatureProcessing] Maximum lags for lagged features to analyse
max_diff [int]: [FeatureProcessing] Maximum differencing order for differencing features
Interval Analyser:
interval_analyse [bool]: Whether to use IntervalAnalyser module
Note that this has no effect when data from ``self._read_data`` is not multi-indexed
Sequencing:
sequence [bool]: [Sequencing] Whether to use Sequence module
seq_back [int or list[int]]: Input time indices
If list -> includes all integers within the list
If int -> includes that many samples back
seq_forward [int or list[int]: Output time indices
If list -> includes all integers within the list.
If int -> includes that many samples forward.
seq_shift [int]: Shift input / output samples in time
seq_diff [int]: Difference the input & output, 'none', 'diff' or 'log_diff'
seq_flat [bool]: Whether to return a matrix (True) or Tensor (Flat)
Modelling:
standardize [bool]: Whether to standardize input/output data
shuffle [bool]: Whether to shuffle the samples during cross-validation
cv_splits [int]: How many cross-validation splits to make
store_models [bool]: Whether to store all trained model files
Grid Search:
grid_search_type [Optional[str]]: Which method to use 'optuna', 'halving', 'base' or None
grid_search_time_budget : Time budget for grid search
grid_search_candidates : Parameter evaluation budget for grid search
grid_search_iterations : Model evaluation budget for grid search
Stacking:
stacking [bool]: Whether to create a stacking model at the end
Production:
preprocess_function [str]: Add custom code for the prediction function, useful for production. Will be executed
with exec, can be multiline. Uses data as input.
Flags:
logging_level [Optional[Union[int, str]]]: Logging level for warnings, info, etc.
plot_eda [bool]: Whether to run Exploratory Data Analysis
process_data [bool]: Whether to force data processing
document_results [bool]: Whether to force documenting
no_dirs [bool]: Whether to create files or not
verbose [int]: Level of verbosity
"""
# Copy arguments
##################
# Main Settings
self.mainDir = kwargs.get('main_dir', 'AutoML/')
self.target = re.sub('[^a-z0-9]', '_', kwargs.get('target', '').lower())
self.name = kwargs.get('name', 'AutoML')
self.version = kwargs.get('version', None)
self.mode = kwargs.get('mode', None)
self.objective = kwargs.get('objective', None)
# Data Processor
self.intCols = kwargs.get('int_cols', None)
self.floatCols = kwargs.get('float_cols', None)
self.dateCols = kwargs.get('date_cols', None)
self.catCols = kwargs.get('cat_cols', None)
self.missingValues = kwargs.get('missing_values', 'zero')
self.outlierRemoval = kwargs.get('outlier_removal', 'clip')
self.zScoreThreshold = kwargs.get('z_score_threshold', 4)
self.includeOutput = kwargs.get('include_output', False)
# Balancer
self.balance = kwargs.get('balance', True)
# Feature Processor
self.extractFeatures = kwargs.get('extract_features', True)
self.informationThreshold = kwargs.get('information_threshold', 0.999)
self.featureTimeout = kwargs.get('feature_timeout', 3600)
self.maxLags = kwargs.get('max_lags', 0)
self.maxDiff = kwargs.get('max_diff', 0)
# Interval Analyser
self.useIntervalAnalyser = kwargs.get('interval_analyse', True)
# Sequencer
self.sequence = kwargs.get('sequence', False)
self.sequenceBack = kwargs.get('seq_back', 1)
self.sequenceForward = kwargs.get('seq_forward', 1)
self.sequenceShift = kwargs.get('seq_shift', 0)
self.sequenceDiff = kwargs.get('seq_diff', 'none')
self.sequenceFlat = kwargs.get('seq_flat', True)
# Modelling
self.standardize = kwargs.get('standardize', False)
self.shuffle = kwargs.get('shuffle', True)
self.cvSplits = kwargs.get('cv_shuffle', 10)
self.storeModels = kwargs.get('store_models', False)
# Grid Search Parameters
self.gridSearchType = kwargs.get('grid_search_type', 'optuna')
self.gridSearchTimeout = kwargs.get('grid_search_time_budget', 3600)
self.gridSearchCandidates = kwargs.get('grid_search_candidates', 250)
self.gridSearchIterations = kwargs.get('grid_search_iterations', 3)
# Stacking
self.stacking = kwargs.get('stacking', False)
# Production
self.preprocessFunction = kwargs.get('preprocess_function', None)
# Flags
self.plotEDA = kwargs.get('plot_eda', False)
self.processData = kwargs.get('process_data', True)
self.documentResults = kwargs.get('document_results', True)
self.verbose = kwargs.get('verbose', 0)
self.noDirs = kwargs.get('no_dirs', False)
# Checks
assert self.mode in [None, 'regression', 'classification'], 'Supported modes: regression, classification.'
assert 0 < self.informationThreshold < 1, 'Information threshold needs to be within [0, 1]'
assert self.maxLags < 50, 'Max_lags too big. Max 50.'
assert self.maxDiff < 5, 'Max diff too big. Max 5.'
assert self.gridSearchType is None \
or self.gridSearchType.lower() in ['base', 'halving', 'optuna'], \
'Grid Search Type must be Base, Halving, Optuna or None'
# Advices
if self.includeOutput and not self.sequence:
warnings.warn('[AutoML] IMPORTANT: strongly advices to not include output without sequencing.')
# Create dirs
if not self.noDirs:
self._create_dirs()
self._load_version()
# Store Pipeline Settings
self.settings = {'pipeline': kwargs, 'validation': {}, 'feature_set': ''}
# Objective & Scorer
self.scorer = None
if self.objective is not None:
assert isinstance(self.objective, str), 'Objective needs to be a string'
assert self.objective in metrics.SCORERS.keys(), 'Metric not supported, look at sklearn.metrics'
# Required sub-classes
self.dataSampler = DataSampler()
self.dataProcessor = DataProcessor()
self.dataSequencer = Sequencer()
self.featureProcessor = FeatureProcessor()
self.intervalAnalyser = IntervalAnalyser()
self.driftDetector = DriftDetector()
# Instance initiating
self.bestModel = None
self._data = None
self.featureSets = None
self.results = None
self.n_classes = None
self.is_fitted = False
# Monitoring
logging_level = kwargs.get('logging_level', 'INFO')
logging_dir = Path(self.mainDir) / 'app_logs.log' if not self.noDirs else None
self.logger = Utils.logging.get_logger('AutoML', logging_dir, logging_level, capture_warnings=True)
self._prediction_time = None
self._main_predictors = None
# User Pointing Functions
def get_settings(self, version: int = None) -> dict:
"""
Get settings to recreate fitted object.
Parameters
----------
version : int, optional
Production version, defaults to current version
"""
if version is None or version == self.version:
assert self.is_fitted, "Pipeline not yet fitted."
return self.settings
else:
settings_path = self.mainDir + f'Production/v{self.version}/Settings.json'
assert Path(settings_path).exists(), 'Cannot load settings from nonexistent version'
return json.load(open(settings_path, 'r'))
def load_settings(self, settings: dict):
"""
Restores a pipeline from settings.
Parameters
----------
settings [dict]: Pipeline settings
"""
# Set parameters
settings['pipeline']['no_dirs'] = True
self.__init__(**settings['pipeline'])
self.settings = settings
self.dataProcessor.load_settings(settings['data_processing'])
self.featureProcessor.load_settings(settings['feature_processing'])
# TODO: load_settings for IntervalAnalyser (not yet implemented)
if 'drift_detector' in settings:
self.driftDetector = DriftDetector(
num_cols=self.dataProcessor.float_cols + self.dataProcessor.int_cols,
cat_cols=self.dataProcessor.cat_cols,
date_cols=self.dataProcessor.date_cols
).load_weights(settings['drift_detector'])
def load_model(self, model: object):
"""
Restores a trained model
"""
assert type(model).__name__ == self.settings['model']
self.bestModel = model
self.is_fitted = True
def fit(self, *args, **kwargs):
"""
Fit the full AutoML pipeline.
1. Prepare data for training
2. Train / optimize models
3. Prepare Production Files
Nicely organises all required scripts / files to make a prediction
Parameters
----------
args
For data reading - Propagated to `self.data_preparation`
kwargs
For data reading (propagated to `self.data_preparation`) AND
for production filing (propagated to `self.conclude_fitting`)
"""
# Starting
print('\n\n*** Starting Amplo AutoML - {} ***\n\n'.format(self.name))
# Prepare data for training
self.data_preparation(*args, **kwargs)
# Train / optimize models
self.model_training(**kwargs)
# Conclude fitting
self.conclude_fitting(**kwargs)
def data_preparation(self, *args, **kwargs):
"""
Prepare data for modelling
1. Data Processing
Cleans all the data. See @DataProcessing
2. (optional) Exploratory Data Analysis
Creates a ton of plots which are helpful to improve predictions manually
3. Feature Processing
Extracts & Selects. See @FeatureProcessing
Parameters
----------
args
For data reading - Propagated to `self._read_data`
kwargs
For data reading - Propagated to `self._read_data`
"""
# Reading data
self._read_data(*args, **kwargs)
# Check data
obs = DataObserver(pipeline=self)
obs.observe()
# Detect mode (classification / regression)
self._mode_detector()
# Preprocess Data
self._data_processing()
# Run Exploratory Data Analysis
self._eda()
# Balance data
self._data_sampling()
# Sequence
self._sequencing()
# Extract and select features
self._feature_processing()
# Interval-analyze data
self._interval_analysis()
# Standardize
# Standardizing assures equal scales, equal gradients and no clipping.
# Therefore, it needs to be after sequencing & feature processing, as this alters scales
self._standardizing()
def model_training(self, **kwargs):
"""Train models
1. Initial Modelling
Runs various off the shelf models with default parameters for all feature sets
If Sequencing is enabled, this is where it happens, as here, the feature set is generated.
2. Grid Search
Optimizes the hyperparameters of the best performing models
3. (optional) Create Stacking model
4. (optional) Create documentation
Parameters
----------
kwargs : optional
Keyword arguments that will be passed to `self.grid_search`.
"""
# Run initial models
self._initial_modelling()
# Optimize Hyper parameters
self.grid_search(**kwargs)
# Create stacking model
self._create_stacking()
def conclude_fitting(self, *, model=None, feature_set=None, params=None, **kwargs):
"""
Prepare production files that are necessary to deploy a specific
model / feature set combination
Creates or modifies the following files
- ``Model.joblib`` (production model)
- ``Settings.json`` (model settings)
- ``Report.pdf`` (training report)
Parameters
----------
model : str or list of str, optional
Model file for which to prepare production files. If multiple, selects the best.
feature_set : str or list of str, optional
Feature set for which to prepare production files. If multiple, selects the best.
params : dict, optional
Model parameters for which to prepare production files.
Default: takes the best parameters
kwargs
Collecting container for keyword arguments that are passed through `self.fit()`.
"""
# Set up production path
prod_dir = self.mainDir + f'Production/v{self.version}/'
Path(prod_dir).mkdir(exist_ok=True)
# Parse arguments
model, feature_set, params = self._parse_production_args(model, feature_set, params)
# Verbose printing
if self.verbose > 0:
print(f'[AutoML] Preparing Production files for {model}, {feature_set}, {params}')
# Set best model (`self.bestModel`)
self._prepare_production_model(prod_dir + 'Model.joblib', model, feature_set, params)
# Set and store production settings
self._prepare_production_settings(prod_dir + 'Settings.json', model, feature_set, params)
# Observe production
# TODO[TS, 25.05.2022]: Currently, we are observing the data also here.
# However, in a future version we probably will only observe the data
# directly after :func:`_read_data()`. For now we wait...
obs = ProductionObserver(pipeline=self)
obs.observe()
self.settings['production_observation'] = obs.observations
# Report
report_path = self.mainDir + f'Documentation/v{self.version}/{model}_{feature_set}.pdf'
if not Path(report_path).exists():
self.document(self.bestModel, feature_set)
shutil.copy(report_path, prod_dir + 'Report.pdf')
# Finish
self.is_fitted = True
print('[AutoML] All done :)')
def convert_data(self, x: pd.DataFrame, preprocess: bool = True) -> [pd.DataFrame, pd.Series]:
"""
Function that uses the same process as the pipeline to clean data.
Useful if pipeline is pickled for production
Parameters
----------
data [pd.DataFrame]: Input features
"""
# Convert to Pandas
if isinstance(x, np.ndarray):
x = pd.DataFrame(x, columns=[f"Feature_{i}" for i in range(x.shape[1])])
# Custom code
if self.preprocessFunction is not None and preprocess:
ex_globals = {'data': x}
exec(self.preprocessFunction, ex_globals)
x = ex_globals['data']
# Process data
x = self.dataProcessor.transform(x)
# Drift Check
self.driftDetector.check(x)
# Split output
y = None
if self.target in x.keys():
y = x[self.target]
if not self.includeOutput:
x = x.drop(self.target, axis=1)
# Sequence
if self.sequence:
x, y = self.dataSequencer.convert(x, y)
# Convert Features
x = self.featureProcessor.transform(x, self.settings['feature_set'])
# Standardize
if self.standardize:
x, y = self._transform_standardize(x, y)
# NaN test -- datetime should be taken care of by now
if x.astype('float32').replace([np.inf, -np.inf], np.nan).isna().sum().sum() != 0:
raise ValueError(f"Column(s) with NaN: {list(x.keys()[x.isna().sum() > 0])}")
# Return
return x, y
def predict(self, data: pd.DataFrame) -> np.ndarray:
"""
Full script to make predictions. Uses 'Production' folder with defined or latest version.
Parameters
----------
data [pd.DataFrame]: data to do prediction on
"""
start_time = time.time()
assert self.is_fitted, "Pipeline not yet fitted."
# Print
if self.verbose > 0:
print('[AutoML] Predicting with {}, v{}'.format(type(self.bestModel).__name__, self.version))
# Convert
x, y = self.convert_data(data)
# Predict
if self.mode == 'regression' and self.standardize:
predictions = self._inverse_standardize(self.bestModel.predict(x))
else:
predictions = self.bestModel.predict(x)
# Stop timer
self._prediction_time = (time.time() - start_time) / len(x) * 1000
# Calculate main predictors
self._get_main_predictors(x)
return predictions
def predict_proba(self, data: pd.DataFrame) -> np.ndarray:
"""
Returns probabilistic prediction, only for classification.
Parameters
----------
data [pd.DataFrame]: data to do prediction on
"""
start_time = time.time()
assert self.is_fitted, "Pipeline not yet fitted."
assert self.mode == 'classification', 'Predict_proba only available for classification'
assert hasattr(self.bestModel, 'predict_proba'), '{} has no attribute predict_proba'.format(
type(self.bestModel).__name__)
# Print
if self.verbose > 0:
print('[AutoML] Predicting with {}, v{}'.format(type(self.bestModel).__name__, self.version))
# Convert data
x, y = self.convert_data(data)
# Predict
prediction = self.bestModel.predict_proba(x)
# Stop timer
self._prediction_time = (time.time() - start_time) / len(x) * 1000
# Calculate main predictors
self._get_main_predictors(x)
return prediction
# Fit functions
def _read_data(self, x=None, y=None, *, data=None, **kwargs):
"""
Reads and loads data into desired format.
Expects to receive:
1. Both, ``x`` and ``y`` (-> features and target), or
2. Either ``x`` or ``data`` (-> dataframe or path to folder)
Parameters
----------
x : np.ndarray or pd.Series or pd.DataFrame or str or Path, optional
x-data (input) OR acts as ``data`` parameter when param ``y`` is empty
y : np.ndarray or pd.Series, optional
y-data (target)
data : pd.DataFrame or str or Path, optional
Contains both, x and y, OR provides a path to folder structure
kwargs
Collecting container for keyword arguments that are passed through `self.fit()`.
Returns
-------
Pipeline
"""
assert x is not None or data is not None, 'No data provided'
assert (x is not None) ^ (data is not None), 'Setting both, `x` and `data`, is ambiguous'
# Labels are provided separately
if y is not None:
# Check data
x = x if x is not None else data
assert x is not None, 'Parameter ``x`` is not set'
assert isinstance(x, (np.ndarray, pd.Series, pd.DataFrame)), 'Unsupported data type for parameter ``x``'
assert isinstance(y, (np.ndarray, pd.Series)), 'Unsupported data type for parameter ``y``'
# Set target manually if not defined
if self.target == '':
self.target = 'target'
# Parse x-data
if isinstance(x, np.ndarray):
x = pd.DataFrame(x)
elif isinstance(x, pd.Series):
x = pd.DataFrame(x)
# Parse y-data
if isinstance(y, np.ndarray):
y =
|
pd.Series(y, index=x.index)
|
pandas.Series
|
from kfp.v2.dsl import (Artifact,
Dataset,
Input,
Model,
Output,
Metrics,
ClassificationMetrics)
def get_ml_op(
start_date : str,
pre_processed_dataset : Input[Dataset],
bros_dataset : Input[Dataset],
dic_model_dataset : Output[Dataset],
dic_df_pred_dataset : Output[Dataset],
prediction_result_dataset : Output[Dataset]
) -> str :
DESC = "model m19-12 / Classifier / change_p1_over1 / no bros / Incl top30 / Incl. KODEX ETN / +-25% Training / +- 25% Prediction / mCap 500억 이상, 5조 이하"
import pandas as pd
import pickle
from sklearn.model_selection import train_test_split
from catboost import Pool
from catboost import CatBoostClassifier, CatBoostRegressor
# Load Dataset
df_preP = pd.read_pickle(pre_processed_dataset.path)
df_bros = pd.read_pickle(bros_dataset.path)
# Dates things ...
l_dates = df_preP.date.unique().tolist()
print(f'df_preP start from {l_dates[0]} end at {l_dates[-1]} shape : {df_preP.shape}')
idx_start = l_dates.index(start_date)
print(f'index of start date : {idx_start}')
period = int(l_dates.__len__() - idx_start)
# get Univ df
def get_15pct_univ_in_period(df, l_dates): # input dataframe : top30s in the period
print(f'length of l_date : {l_dates.__len__()}')
df_univ = pd.DataFrame()
for date in l_dates :
df_of_the_day = df[df.date == date]
df_of_the_day = df_of_the_day[(df_of_the_day.mkt_cap > 500) & (df_of_the_day.mkt_cap < 50000)]
df_15pct_of_the_day = df_of_the_day[(df_of_the_day.change >= -0.25) & (df_of_the_day.change <= 0.25)]
# l_codes = df_15pct_of_the_day.code.unique().tolist()
# df_bros_in_date = df_bros[df_bros.date == date]
# l_bros_of_top30s = df_bros_in_date[\
# df_bros_in_date.source.isin(l_codes)].target.unique().tolist()
# df_bros_of_top30 = df_of_the_day[df_of_the_day.code.isin(l_bros_of_top30s)]
df_ = df_15pct_of_the_day #.append(df_bros_of_top30)
df_.drop_duplicates(subset=['code', 'date'], inplace=True)
df_univ = df_univ.append(df_)
return df_univ
# Set Target and Feats
# target_col = ['target_close_over_10']
target_col = ['change_p1_over1']
cols_indicator = [ 'code', 'name', 'date', ]
features = [
# 'code',
# 'name',
# 'date',
# 'rank',
'mkt_cap',
# 'mkt_cap_cat',
'in_top30',
# 'rank_mean_10',
# 'rank_mean_5',
'in_top_30_5',
'in_top_30_10',
'in_top_30_20',
# 'up_bro_ratio_20',
# 'up_bro_ratio_40',
# 'up_bro_ratio_60',
# 'up_bro_ratio_90',
# 'up_bro_ratio_120',
# 'n_bro_20',
# 'n_bro_40',
# 'n_bro_60',
# 'n_bro_90',
# 'n_bro_120',
# 'all_bro_rtrn_mean_20',
# 'all_bro_rtrn_mean_40',
# 'all_bro_rtrn_mean_60',
# 'all_bro_rtrn_mean_90',
# 'all_bro_rtrn_mean_120',
# 'up_bro_rtrn_mean_20',
# 'up_bro_rtrn_mean_40',
# 'up_bro_rtrn_mean_60',
# 'up_bro_rtrn_mean_90',
# 'up_bro_rtrn_mean_120',
# 'all_bro_rtrn_mean_ystd_20',
# 'all_bro_rtrn_mean_ystd_40',
# 'all_bro_rtrn_mean_ystd_60',
# 'all_bro_rtrn_mean_ystd_90',
# 'all_bro_rtrn_mean_ystd_120',
# 'bro_up_ratio_ystd_20',
# 'bro_up_ratio_ystd_40',
# 'bro_up_ratio_ystd_60',
# 'bro_up_ratio_ystd_90',
# 'bro_up_ratio_ystd_120',
# 'up_bro_rtrn_mean_ystd_20',
# 'up_bro_rtrn_mean_ystd_40',
# 'up_bro_rtrn_mean_ystd_60',
# 'up_bro_rtrn_mean_ystd_90',
# 'up_bro_rtrn_mean_ystd_120',
# 'index',
# 'open_x',
# 'high_x',
# 'low_x',
# 'close_x',
# 'volume_x',
# 'change_x',
# 'high_p1',
# 'high_p2',
# 'high_p3',
# 'close_p1',
# 'close_p2',
# 'close_p3',
# 'change_p1',
# 'change_p2',
# 'change_p3',
# 'change_p1_over5',
# 'change_p2_over5',
# 'change_p3_over5',
# 'change_p1_over10',
# 'change_p2_over10',
# 'change_p3_over10',
# 'close_high_1',
# 'close_high_2',
# 'close_high_3',
# 'close_high_1_over10',
# 'close_high_2_over10',
# 'close_high_3_over10',
# 'close_high_1_over5',
# 'close_high_2_over5',
# 'close_high_3_over5',
# 'open_y',
# 'high_y',
# 'low_y',
# 'close_y',
# 'volume_y',
# 'change_y',
# 'macd',
# 'boll_ub',
# 'boll_lb',
# 'rsi_30',
# 'dx_30',
# 'close_30_sma',
# 'close_60_sma',
'daily_return',
'return_lag_1',
'return_lag_2',
'return_lag_3',
'bb_u_ratio',
'bb_l_ratio',
# 'max_scale_MACD',
'volume_change_wrt_10max',
'volume_change_wrt_5max',
'volume_change_wrt_20max',
'volume_change_wrt_10mean',
'volume_change_wrt_5mean',
'volume_change_wrt_20mean',
'close_ratio_wrt_10max',
'close_ratio_wrt_10min',
'oh_ratio',
'oc_ratio',
'ol_ratio',
'ch_ratio',
# 'Symbol',
# 'DesignationDate',
# 'admin_stock',
# 'dayofweek'
]
# Model training and prediction
df_pred_all = pd.DataFrame()
dic_model = {}
dic_pred = {}
for i in range(idx_start, l_dates.__len__()):
dates_for_train = l_dates[i-23: i-3] # 며칠전까지 볼것인가!! 20일만! 일단은
date_ref = l_dates[i]
print(f'train date : from {dates_for_train[0]} to {dates_for_train[-1]}')
print(f'prediction date : {date_ref}')
df_train = get_15pct_univ_in_period(df_preP, dates_for_train)
df_train = df_train.dropna(axis=0, subset=target_col)
# Prediction Dataset Concept used by mistake
df_pred = df_preP[df_preP.date == date_ref]
df_pred = df_pred[(df_pred.change >= -0.25) & (df_pred.change <= 0.25)] #get_15pct_univ_in_period(df_preP, [date_ref])
df_pred = df_pred[(df_pred.mkt_cap > 500) & (df_pred.mkt_cap < 50000)]
# df_pred['date'] = date_ref
print(f'shape of df_pred : {df_pred.shape}')
dic_pred[f'{date_ref}'] = df_pred[features] # df_pred 모아두기
# ML Model
model = CatBoostClassifier(
iterations=1000,
train_dir = '/tmp',
# verbose=500,
silent=True
)
X = df_train[features + cols_indicator ]
y = df_train[target_col].astype('float')
# Run prediction 3 times
df_pred_the_day = pd.DataFrame()
for iter_n in range(3):
X_train, X_test, y_train, y_test = train_test_split(X, y)
X_train = X_train[features]
X_test = X_test[features]
eval_dataset = Pool(
X_test, y_test,
# cat_features=['mkt_cap_cat']
cat_features=['in_top30']
)
print('X Train Size : ', X_train.shape, 'Y Train Size : ', y_train.shape)
model.fit(X_train, y_train,
use_best_model=True,
eval_set = eval_dataset,
# cat_features=['in_top30','dayofweek', 'mkt_cap_cat']
cat_features=['in_top30']
)
dic_model[f'{date_ref}_{iter_n}'] = model
print(model.get_best_iteration())
# Prediction
pred_result = model.predict(df_pred[features])
pred_proba = model.predict_proba(df_pred[features])
df_pred_result =
|
pd.DataFrame(pred_result, columns=['Prediction'])
|
pandas.DataFrame
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os.path
import collections
import urllib.parse
import pkg_resources
import itertools
import tempfile
import subprocess
import skbio
import skbio.diversity
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from statsmodels.sandbox.stats.multicomp import multipletests
import qiime2
import q2templates
from natsort import natsorted
from patsy import ModelDesc
TEMPLATES = pkg_resources.resource_filename('q2_diversity', '_beta')
def bioenv(output_dir: str, distance_matrix: skbio.DistanceMatrix,
metadata: qiime2.Metadata) -> None:
# Filter metadata to only include IDs present in the distance matrix.
# Also ensures every distance matrix ID is present in the metadata.
metadata = metadata.filter_ids(distance_matrix.ids)
# drop non-numeric columns and empty columns
pre_filtered_cols = set(metadata.columns)
metadata = metadata.filter_columns(column_type='numeric')
non_numeric_cols = pre_filtered_cols - set(metadata.columns)
# filter 0 variance numerical columns and empty columns
pre_filtered_cols = set(metadata.columns)
metadata = metadata.filter_columns(drop_zero_variance=True,
drop_all_missing=True)
zero_variance_cols = pre_filtered_cols - set(metadata.columns)
# Drop samples that have any missing values.
# TODO use Metadata API if this type of filtering is supported in the
# future.
df = metadata.to_dataframe()
df = df.dropna(axis='index', how='any')
# filter the distance matrix to exclude samples that were dropped from
# the metadata, and keep track of how many samples survived the filtering
# so that information can be presented to the user.
initial_dm_length = distance_matrix.shape[0]
distance_matrix = distance_matrix.filter(df.index)
filtered_dm_length = distance_matrix.shape[0]
result = skbio.stats.distance.bioenv(distance_matrix, df)
result = q2templates.df_to_html(result)
index = os.path.join(TEMPLATES, 'bioenv_assets', 'index.html')
q2templates.render(index, output_dir, context={
'initial_dm_length': initial_dm_length,
'filtered_dm_length': filtered_dm_length,
'non_numeric_cols': ', '.join(sorted(non_numeric_cols)),
'zero_variance_cols': ', '.join(sorted(zero_variance_cols)),
'result': result})
_beta_group_significance_fns = {'permanova': skbio.stats.distance.permanova,
'anosim': skbio.stats.distance.anosim,
'permdisp': skbio.stats.distance.permdisp}
def _get_distance_boxplot_data(distance_matrix, group_id, groupings):
x_ticklabels = []
all_group_distances = []
# extract the within group distances
within_group_distances = []
pairs_summary = []
group = groupings[group_id]
for i, sid1 in enumerate(group):
for sid2 in group[:i]:
dist = distance_matrix[sid1, sid2]
within_group_distances.append(dist)
pairs_summary.append((sid1, sid2, group_id, group_id, dist))
x_ticklabels.append('%s (n=%d)' %
(group_id, len(within_group_distances)))
all_group_distances.append(within_group_distances)
# extract between group distances for group to each other group
for other_group_id, other_group in groupings.items():
between_group_distances = []
if group_id == other_group_id:
continue
for sid1 in group:
for sid2 in other_group:
dist = distance_matrix[sid1, sid2]
between_group_distances.append(dist)
pairs_summary.append(
(sid1, sid2, group_id, other_group_id, dist))
x_ticklabels.append('%s (n=%d)' %
(other_group_id, len(between_group_distances)))
all_group_distances.append(between_group_distances)
return all_group_distances, x_ticklabels, pairs_summary
def _get_pairwise_group_significance_stats(
distance_matrix, group1_id, group2_id, groupings, metadata,
beta_group_significance_fn, permutations):
group1_group2_samples = groupings[group1_id] + groupings[group2_id]
metadata = metadata[group1_group2_samples]
distance_matrix = distance_matrix.filter(group1_group2_samples)
return beta_group_significance_fn(distance_matrix, metadata,
permutations=permutations)
def beta_group_significance(output_dir: str,
distance_matrix: skbio.DistanceMatrix,
metadata: qiime2.CategoricalMetadataColumn,
method: str = 'permanova',
pairwise: bool = False,
permutations: int = 999) -> None:
try:
beta_group_significance_fn = _beta_group_significance_fns[method]
except KeyError:
raise ValueError('Unknown group significance method %s. The available '
'options are %s.' %
(method,
', '.join(_beta_group_significance_fns)))
# Filter metadata to only include IDs present in the distance matrix.
# Also ensures every distance matrix ID is present in the metadata.
metadata = metadata.filter_ids(distance_matrix.ids)
metadata = metadata.drop_missing_values()
# filter the distance matrix to exclude samples that were dropped from
# the metadata due to missing values, and keep track of how many samples
# survived the filtering so that information can be presented to the user.
initial_dm_length = distance_matrix.shape[0]
distance_matrix = distance_matrix.filter(metadata.ids)
filtered_dm_length = distance_matrix.shape[0]
metadata = metadata.to_series()
# Run the significance test
result = beta_group_significance_fn(distance_matrix, metadata,
permutations=permutations)
# Generate distance boxplots
sns.set_style('white')
# Identify the groups, then compute the within group distances and the
# between group distances, and generate one boxplot per group.
# groups will be an OrderedDict mapping group id to the sample ids in that
# group. The order is used both on the x-axis, and in the layout of the
# boxplots in the visualization.
# TODO: update to use a grouping API and natsort API on
# CategoricalMetadataColumn, if those become available.
groupings = collections.OrderedDict(
[(id, list(series.index))
for id, series in natsorted(metadata.groupby(metadata))])
pairs_summary = pd.DataFrame(columns=['SubjectID1', 'SubjectID2', 'Group1',
'Group2', 'Distance'])
for group_id in groupings:
group_distances, x_ticklabels, group_pairs_summary = \
_get_distance_boxplot_data(distance_matrix, group_id, groupings)
group_pairs_summary = pd.DataFrame(
group_pairs_summary, columns=['SubjectID1', 'SubjectID2',
'Group1', 'Group2', 'Distance'])
pairs_summary =
|
pd.concat([pairs_summary, group_pairs_summary])
|
pandas.concat
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]),
s0.to_frame())
tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]),
s1.to_frame())
# non-matching produces empty frame
assert pd.DataFrame(s0, columns=[1]).empty
assert pd.DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == 'x'
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, self.frame)
# pass some columns
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, self.frame.loc[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert recons['A'].dtype == np.float64
msg = "Must pass columns with orient='index'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index',
columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
def test_constructor_from_items_scalars(self):
# GH 17312
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 4)])
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 2)], columns=['col1'],
orient='index')
def test_from_items_deprecation(self):
# GH 17320
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
columns=['col1', 'col2', 'col3'],
orient='index')
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
tm.assert_frame_equal(df, self.frame.loc[:, ['A', 'B']])
msg = 'does not match index length'
with pytest.raises(ValueError, match=msg):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_records([(8, 5)],
columns=['a', 'a'])
tm.assert_frame_equal(idf, edf)
pytest.raises(ValueError, DataFrame.from_dict,
OrderedDict([('b', 8), ('a', 5), ('a', 6)]))
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')
tm.assert_frame_equal(df, expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df,
DataFrame(np.zeros(df.shape).astype('float64'),
df.index, df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'),
df.index, df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
tm.assert_frame_equal(df, DataFrame(np.array([['a', 'a'], ['a', 'a']],
dtype=object),
index=[1, 2], columns=['a', 'c']))
pytest.raises(ValueError, DataFrame, 'a', [1, 2])
pytest.raises(ValueError, DataFrame, 'a', columns=['a', 'c'])
msg = 'incompatible data and dtype'
with pytest.raises(TypeError, match=msg):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A': 1, 'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime(2001, 1, 2, 0, 0)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname: 2})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0
# ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array(1., dtype=floatname),
intname: np.array(1, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = {objectname: 1}
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result = result.sort_index()
expected = Series(expected).sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array([1.] * 10, dtype=floatname),
intname: np.array([1] * 10, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
result = result.sort_index()
tm.assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
assert datetime_s.dtype == 'M8[ns]'
df = DataFrame({'datetime_s': datetime_s})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates': dates})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({'End Date': dt}, index=[0])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
df = DataFrame([{'End Date': dt}])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
df = DataFrame({'value': dr})
assert df.iat[0, 0].tz is None
dr = date_range('20130101', periods=3, tz='UTC')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'UTC'
dr = date_range('20130101', periods=3, tz='US/Eastern')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'US/Eastern'
# GH 7822
# preserver an index with a tz on dict construction
i = date_range('1/1/2011', periods=5, freq='10s', tz='US/Eastern')
expected = DataFrame(
{'a': i.to_series(keep_tz=True).reset_index(drop=True)})
df = DataFrame()
df['a'] = i
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': i})
tm.assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range('1/1/2011', periods=5, freq='10s')
df = DataFrame({'a': i, 'b': i_no_tz})
expected = DataFrame({'a': i.to_series(keep_tz=True)
.reset_index(drop=True), 'b': i_no_tz})
tm.assert_frame_equal(df, expected)
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [np.array([None, None, None, None,
datetime.now(), None]),
np.array([None, None, datetime.now(), None])]:
result = DataFrame(arr).get_dtype_counts()
expected = Series({'datetime64[ns]': 1})
tm.assert_series_equal(result, expected)
def test_constructor_for_list_with_dtypes(self):
# TODO(wesm): unused
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int64': 5})
df = DataFrame([np.array(np.arange(5), dtype='int32')
for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int32': 5})
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({'a': [2 ** 31, 2 ** 31 + 1]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
# GH #2751 (construction with no index specified), make sure we cast to
# platform values
df = DataFrame([1, 2])
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame([1., 2.])
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1, 2]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1., 2.]})
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1.}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
# with object list
df = DataFrame({'a': [1, 2, 4, 7], 'b': [1.2, 2.3, 5.1, 6.3],
'c': list('abcd'),
'd': [datetime(2000, 1, 1) for i in range(4)],
'e': [1., 2, 4., 7]})
result = df.get_dtype_counts()
expected = Series(
{'int64': 1, 'float64': 2, datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_constructor_frame_copy(self):
cop = DataFrame(self.frame, copy=True)
cop['A'] = 5
assert (cop['A'] == 5).all()
assert not (self.frame['A'] == 5).all()
def test_constructor_ndarray_copy(self):
df = DataFrame(self.frame.values)
self.frame.values[5] = 5
assert (df.values[5] == 5).all()
df = DataFrame(self.frame.values, copy=True)
self.frame.values[6] = 6
assert not (df.values[6] == 6).all()
def test_constructor_series_copy(self):
series = self.frame._series
df = DataFrame({'A': series['A']})
df['A'][:] = 5
assert not (series['A'] == 5).all()
def test_constructor_with_nas(self):
# GH 5016
# na's in indices
def check(df):
for i in range(len(df.columns)):
df.iloc[:, i]
indexer = np.arange(len(df.columns))[isna(df.columns)]
# No NaN found -> error
if len(indexer) == 0:
def f():
df.loc[:, np.nan]
pytest.raises(TypeError, f)
# single nan should result in Series
elif len(indexer) == 1:
tm.assert_series_equal(df.iloc[:, indexer[0]],
df.loc[:, np.nan])
# multiple nans should result in DataFrame
else:
tm.assert_frame_equal(df.iloc[:, indexer],
df.loc[:, np.nan])
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan])
check(df)
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
# GH 21428 (non-unique columns)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1, 2, 2])
check(df)
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({'a': [np.nan, False]})
assert d['a'].dtype == np.object_
assert not d['a'][1]
def test_constructor_categorical(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([Categorical(list('abc')), Categorical(list('abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
pytest.raises(ValueError,
lambda: DataFrame([Categorical(list('abc')),
Categorical(list('abdefg'))]))
# ndim > 1
pytest.raises(NotImplementedError,
lambda: Categorical(np.array([list('abcd')])))
def test_constructor_categorical_series(self):
items = [1, 2, 3, 1]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
items = ["a", "b", "c", "a"]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=('i4,f4,a10'))
arr[:] = [(1, 2., 'Hello'), (2, 3., "World")]
# TODO(wesm): unused
frame = DataFrame.from_records(arr) # noqa
index = pd.Index(np.arange(len(arr))[::-1])
indexed_frame = DataFrame.from_records(arr, index=index)
|
tm.assert_index_equal(indexed_frame.index, index)
|
pandas.util.testing.assert_index_equal
|
import pandas as pd
import assetallocation_arp.models.ARP as arp
def dataimport_future (file):
data=pd.read_excel(file+".xlsx",sheet_name="Data", index_col=[0], header=3,skiprows=[4,5,6,7])
data.index = pd.to_datetime(data.index, format='%d.%m.%Y %H:%M:%S')
data["SterlingEUR"]=(1+data["Sterling"])/(1+data["Euro"])-1
data["SkronaEUR"]=(1+data["Skrona"])/(1+data["Euro"])-1
data["NokronaEUR"]=(1+data["Nokrona"])/(1+data["Euro"])-1
data["SwissFrancEUR"]=(1+data["SwissFranc"])/(1+data["Euro"])-1
return data
def dataimport_index (file):
data=pd.read_excel(file+".xlsx",sheet_name="Data1", index_col=None, header=1,skiprows=[2,3,4],usecols=[0]+list(range(1,60,4)))
data2=pd.read_excel(file+".xlsx",sheet_name="Data2", index_col=None, header=1,skiprows=[2,3,4],usecols=list(range(0,64,4)))
data3=pd.read_excel(file+".xlsx",sheet_name="Data3", index_col=None, header=1,skiprows=[2,3,4],usecols=list(range(0,60,4)))
data=pd.concat([data, data2, data3], axis=1)
data.index =
|
pd.to_datetime(data['Index'], format='%Y-%m-%d')
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
"""Survival analysis module.
Uses `scikit-survival <https://github.com/sebp/scikit-survival>`_, which can
be installed with ``pip install scikit-survival`` but is imported with
``import sksurv``.
"""
import logging
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import GridSearchCV, KFold
from sksurv.linear_model import CoxnetSurvivalAnalysis
from sksurv.metrics import concordance_index_censored
from tqdm import tqdm
from pathway_forte.constants import (
CANCER_DATA_SETS, CLINICAL_DATA, NORMAL_EXPRESSION_SAMPLES, PATHWAY_RESOURCES, RESULTS,
)
from pathway_forte.utils import get_num_samples
logger = logging.getLogger(__name__)
def prepare_ssgsea_data_for_survival_analysis(
enrichment_score_path: str,
clinical_data_path: str,
normal_sample_size: int,
):
"""Prepare data for input into survival analysis.
:param enrichment_score_path: ssgsea normalized enrichment scores file
:param clinical_data_path: dataFrame of survival status and time to death if death occurred
:param normal_sample_size: sample size
:return: dataFrame of pathway scores for each sample, array of survival status and time to death info
"""
# Read csv files
clinical_data_df = pd.read_csv(clinical_data_path, sep='\t')
enrichment_score =
|
pd.read_csv(enrichment_score_path, sep='\t', header=0)
|
pandas.read_csv
|
import pickle
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score, precision_recall_fscore_support, mean_absolute_percentage_error, roc_curve
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
import statsmodels.api as sm
import util.explore as explore_util
def load_embeds(pickle_path, prefix):
with open(f"../data/transformed_data/{pickle_path}", "rb") as fin:
embeddings = pickle.load(fin)
em_ext = {
"project_id": embeddings["project_ids"],
f"{prefix}_embed_x_tsne": embeddings[f"tsne"][:, 0],
f"{prefix}_embed_y_tsne": embeddings[f"tsne"][:, 1],
f"{prefix}_embed_x_umap": embeddings[f"umap_from_full"][:, 0],
f"{prefix}_embed_y_umap": embeddings[f"umap_from_full"][:, 1],
f"{prefix}_embed_x_umap2": embeddings[f"umap_from_pca"][:, 0],
f"{prefix}_embed_y_umap2": embeddings[f"umap_from_pca"][:, 1]
}
embed_df = pd.DataFrame(em_ext)
return embed_df
def aggregate_proj_fin_type(projectfinancialtype):
if type(projectfinancialtype) != str:
return "UNKNOWN"
ptype = projectfinancialtype.upper()
if "IDA" in ptype and "IBRD" in ptype:
return "BLEND"
elif "GRANTS" in ptype and "IBRD" in ptype:
return "BLEND"
elif "IDA" in ptype or "GRANTS" in ptype:
return "IDA"
elif "IBRD" in ptype:
return "IBRD"
else:
return "OTHER"
def load_projects_with_embeddings(
ipf_feature_cols=[],
during_project_features=[],
reassemble_proj_country_df=False
):
df = pd.read_json("../data/aggregated_proj.json", orient="index")
country_panel = pd.read_csv('../data/countrypanel.csv')
if reassemble_proj_country_df:
df['boardapprovaldate'] = pd.to_datetime(df['boardapprovaldate'])
df['closingdate'] = pd.to_datetime(df['closingdate'])
df['closingyear'] = df.closingdate.dt.year
new_country_df = pd.read_csv('../data/tmp/project_countries.csv')
proj_df = df.merge(new_country_df[['project_country', 'panel_country']],
left_on='countryname', right_on='project_country', how='left')
proj_df = proj_df.drop(columns=['countryname'])
proj_df = proj_df[proj_df.panel_country.notna()]
practice_count = pd.read_csv("../data/transformed_data/WB_project_practices.zip", compression='zip')
proj_df = proj_df.merge(practice_count[['proj_id', 'practice_type_code', 'gp_percentage', 'n_practices']], left_on='id', right_on='proj_id', how='left')
proj_df = proj_df.drop(columns=['proj_id'])
save_transformed_df = True
if save_transformed_df:
proj_df.to_csv('../data/transformed_data/projects_with_ccodes.csv')
else:
proj_df = pd.read_csv('../data/transformed_data/projects_with_ccodes.csv', index_col=0, low_memory=False)
proj_df['boardapprovaldate'] = pd.to_datetime(proj_df['boardapprovaldate'])
proj_df['closingdate'] = pd.to_datetime(proj_df['closingdate'])
aux_proj_data = pd.read_csv('../data/transformed_data/aux_project_data.zip', compression='zip')
pdo_embed_df = load_embeds("title_pdo_embeds_reduced.pkl", "pdo")
dli_embed_df = load_embeds("dli_embeddings_reduced.pkl", "dli")
embed_cols = [col for col in list(pdo_embed_df.columns) + list(dli_embed_df.columns) if col != "project_id"]
sector_df = pd.read_csv('../data/transformed_data/WB_project_sectors.zip', compression='zip').rename(columns={ 'proj_id': 'id' })
main_sector_df = sector_df[sector_df.flag_main_sector == 1]
sector_df['sq_percent'] = sector_df['sector_percentage'] ** 2
hhi_df = sector_df.groupby('id', as_index=False).agg(hhi=('sq_percent', 'sum'))
hhi_df['hhi_clipped'] = hhi_df['hhi'].clip(upper=(100 * 100))
def assemble_df(proj_feature_cols):
ndf = proj_df.merge(aux_proj_data[['projid'] + proj_feature_cols], left_on='id', right_on='projid', how='left')
ndf = ndf.merge(pdo_embed_df, left_on="id", right_on="project_id", how="left")
ndf = ndf.merge(dli_embed_df, left_on="id", right_on="project_id", how="left")
ndf[embed_cols] = ndf[embed_cols].fillna(0)
ndf = ndf.merge(main_sector_df[['id', 'sector_code', 'sector_percentage', 'parent_sector_name']], how='left')
ndf = ndf.merge(hhi_df, how='left')
ndf["financing_type"] = ndf.projectfinancialtype.apply(aggregate_proj_fin_type)
ndf["financing_instr"] = ndf.lendinginstr.replace({
"Sector Investment and Maintenance Loan": "Specific Investment Loan",
0: "UNIDENTIFIED"
})
narrow_sector_features = ['sector1', 'sector2', 'sector3', 'sector4', 'sector5']
sector_count_df = ndf[['id'] + narrow_sector_features]
sector_count_df[narrow_sector_features] = sector_count_df[narrow_sector_features].notna()
sector_count_df['number_sectors'] = sector_count_df[narrow_sector_features].sum(axis=1)
ndf = ndf.merge(sector_count_df[['id', 'number_sectors']])
ndf["focused_project"] = (ndf.sector_percentage > 90) | (ndf.number_sectors == 1)
ndf["scattered_project"] = (~ndf.focused_project) & ((ndf.sector_percentage < 60) | (ndf.number_sectors > 2))
return ndf
approv_df = assemble_df(ipf_feature_cols)
review_df = assemble_df(during_project_features)
return country_panel, approv_df, review_df
def assemble_input_df(ndf, relevant_feature_cols, country_panel,
sector_features=['sector1', 'sector2', 'sector3', 'sector4', 'sector5', 'theme1', 'theme2']):
wdf = ndf[relevant_feature_cols].fillna(0)
wdf['all_sectors_theme_words'] = wdf[sector_features].apply(lambda row: ' '.join(row.values.astype(str)), axis=1).str.lower()
wdf['is_health_project'] = wdf.all_sectors_theme_words.str.contains('health')
wdf['is_education_project'] = wdf.all_sectors_theme_words.str.contains('edu')
data = wdf.merge(country_panel.drop(columns=['regionname']),
left_on=['panel_country', 'closingyear'], right_on=['countryname', 'year'])
data = data.drop(columns=['countryname', 'year'])
data = data[data.closingyear.notna()]
data['pdo_length'] = data['pdo'].str.len().fillna(0)
data = data.rename(columns = { 'project_country': 'country', 'closingyear': 'year' })
return data
# slightly redundant to do each time, but different features may have different missingness
def construct_residual_df(data, project_feature_cols):
health_target = 'mortality_under5_lag-5'
health_to_lag = {
'mortality_under5': -5,
'hiv_prevalence': -5,
'conflict': -5
}
health_observed_X_cols = [
'gdp_pc_ppp',
'fertility',
'population',
'physicians_rate',
'female_adult_literacy',
'access_water',
'access_sanitation',
'hiv_prevalence_lag-5'
]
edu_target = 'edu_aner_lag-5'
edu_to_lag = {
'edu_aner': -5,
'edu_share_gov_exp': -5,
'edu_pupil_teacher': -5,
'young_population': -5,
'cash_surplus_deficit': -5,
'inflation': -5,
'trade_share_gdp': -5,
'freedom_house': -5
}
edu_observed_X_cols = [f"{obs_col}_lag-5" for obs_col in edu_to_lag.keys() if obs_col != "edu_aner"]
health_results = end_to_end_project_eval(
data, "health", "mortality_under5_lag-5", health_to_lag,
observed_X_cols=health_observed_X_cols,
loan_feature_cols=project_feature_cols,
inverted_outcome=True
)
edu_results = end_to_end_project_eval(
data, "edu", "edu_aner_lag-5", edu_to_lag,
observed_X_cols=edu_observed_X_cols,
loan_feature_cols=project_feature_cols,
inverted_outcome=True
)
consolidated_df = pd.concat((health_results["residual_df"], edu_results["residual_df"]))
consolidated_df = consolidated_df.fillna(0)
return consolidated_df, health_results, edu_results
def sum_feature_imp(category_name, feature_imp, exp_features):
return sum([feature_imp[col] for col in exp_features if col.startswith(category_name)])
def extract_feature_imp(est, orig_features, exp_features):
feature_imp = { col: est.feature_importances_[i] for i, col in enumerate(exp_features) }
summed_feature_imp = { col: sum_feature_imp(col, feature_imp, exp_features) for col in orig_features }
return feature_imp, summed_feature_imp
def fit_score_model(X, y, est, classification=False, random_seed=None):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, stratify=y if classification else None, random_state=random_seed)
print("Size of X train: ", len(X_train), " and X test: ", len(X_test), " and width: ", len(X_train.columns))
est.fit(X_train, y_train)
scores = { 'default_score': est.score(X_test, y_test) }
if classification:
true_pred = est.predict_proba(X_test)[:, 1]
scores['fscore_etc'] = precision_recall_fscore_support(y_test, est.predict(X_test), average="binary")
scores['roc_auc'] = roc_auc_score(y_test, true_pred)
scores['roc_curve'] = roc_curve(y_test, true_pred)
scores['roc_auc_train'] = roc_auc_score(y_train, est.predict_proba(X_train)[:, 1])
else:
scores['mape'] = mean_absolute_percentage_error(y_test, est.predict(X_test))
scores['mape_train'] = mean_absolute_percentage_error(y_train, est.predict(X_train))
scores['r2_train'] = est.score(X_train, y_train)
test_data = { "X_test": X_test, "y_test": y_test }
return est, scores
def end_to_end_project_eval(all_data, sector_key_word, target_col, variables_to_lag, observed_X_cols, loan_feature_cols,
regressor=RandomForestRegressor, classifier=RandomForestClassifier, inverted_outcome=False):
sector_data = all_data.copy()
for var in variables_to_lag:
sector_data = explore_util.lag_variable_simple(sector_data, var, variables_to_lag[var])
sector_data['is_sector_project'] = sector_data.all_sectors_theme_words.str.contains(sector_key_word)
sector_data = sector_data[sector_data.is_sector_project]
print("Sector projects data: ", len(sector_data), " versus all projects: ", len(all_data))
sdata = sector_data[['id'] + observed_X_cols + [target_col]]
sdata = sdata.dropna()
print("Clean observations: ", len(sdata))
# print("Pre scaling: ", sdata[observed_X_cols[:2]].describe())
observation_scaler = StandardScaler()
sdata[observed_X_cols] = observation_scaler.fit_transform(sdata[observed_X_cols])
# print("Shape of endog: ", sdata[target_col].shape, " and exog: ", sm.add_constant(sdata[observed_X_cols]).shape)
res_est = sm.OLS(endog=sdata[target_col], exog=sm.add_constant(sdata[observed_X_cols])).fit()
print("Naive R squared of partialling out phase: ", res_est.rsquared, " and f_p: ", res_est.f_pvalue)
# print("Post scaling: ", sdata[observed_X_cols[:2]].describe())
target_resid = f"residual_target"
sdata[target_resid] = res_est.resid
forest_data = sdata[['id', target_resid]].merge(all_data[['id'] + loan_feature_cols], how='inner')
# print(forest_data.isna().sum())
pre_scale_target_desc = forest_data[target_resid].describe()
# print("Descriptive stats for target: ", pre_scale_target_desc)
numeric_cols = forest_data.select_dtypes(include=np.number).columns.tolist()
treatment_scaler = StandardScaler()
forest_data[numeric_cols] = treatment_scaler.fit_transform(forest_data[numeric_cols])
categorical_cols = [col for col in loan_feature_cols if col not in numeric_cols]
forest_data = pd.get_dummies(forest_data, columns=categorical_cols)
forest_data = forest_data.dropna()
print("Clean within project characteristics: ", len(forest_data))
pos_std_dev_threshold = 0.1
forest_data[f'{target_resid}_above_threshold'] = (
forest_data[target_resid] > pos_std_dev_threshold if not inverted_outcome else
forest_data[target_resid] < pos_std_dev_threshold
)
print("Projects with residual above mean: ", len(forest_data[forest_data[target_resid] > 0]))
print("Projects with positive residual above threshold: ", len(forest_data[forest_data[target_resid] > pos_std_dev_threshold]))
nreg = regressor()
nest = classifier()
X = forest_data.drop(columns=['id', target_resid, f'{target_resid}_above_threshold'])
y_reg = forest_data[target_resid]
y_class = forest_data[f'{target_resid}_above_threshold']
reg_fit, reg_scores = fit_score_model(X, y_reg, nreg)
bin_est, bin_scores = fit_score_model(X, y_class, nest, classification=True)
all_col_imp, summed_imp = extract_feature_imp(bin_est, loan_feature_cols, X.columns)
summed_imp = sort_imp(summed_imp)
return {
"partial_out_model": res_est,
"residual_regressor": reg_fit,
"residual_classifier": bin_est,
"regression_scores": reg_scores,
"classifier_scores": bin_scores,
"pre_scale_target_stats": pre_scale_target_desc,
"summed_importances": summed_imp,
"all_importances": all_col_imp,
"residual_df": forest_data
}
def sort_imp(summed_imp):
return { feature: score for feature, score in sorted(summed_imp.items(), key=lambda item: item[1], reverse=False )}
def drop_agg_cols(X, columns_to_drop):
split_cols_to_drop = []
for agg_col in columns_to_drop:
split_cols_to_drop += [col for col in X.columns if agg_col in col]
return X.drop(columns=split_cols_to_drop)
def run_residual_reg(consolidated_df, probe_feature_cols, columns_to_drop=[], reg=RandomForestRegressor(), clf=RandomForestClassifier(), random_seed=None):
X = consolidated_df.drop(columns=['id', "residual_target", f'residual_target_above_threshold'])
if len(columns_to_drop) > 0:
split_cols_to_drop = []
for agg_col in columns_to_drop:
split_cols_to_drop += [col for col in X.columns if agg_col in col]
X = X.drop(columns=split_cols_to_drop)
y_reg = consolidated_df["residual_target"]
y_class = consolidated_df['residual_target_above_threshold']
reg_fit, reg_scores = fit_score_model(X, y_reg, reg, random_seed=random_seed)
bin_est, bin_scores = fit_score_model(X, y_class, clf, classification=True, random_seed=random_seed)
all_col_imp, summed_imp = extract_feature_imp(bin_est, probe_feature_cols, X.columns)
summed_imp = { feature: score for feature, score in sorted(summed_imp.items(), key=lambda item: item[1], reverse=True)}
models = [reg_fit, bin_est]
return bin_scores, reg_scores, summed_imp, models
def conduct_drop_one(data=None, clf=None, all_feature_cols=None, feature_imp=None,
cols_to_ablate=None, cols_to_exclude=None, ref_bin_scores=None, ref_reg_scores=None, random_seed=None):
by_col_ablation = []
print("Initiating drop one tests, for columns: ", cols_to_ablate)
for col in cols_to_ablate:
print(".", end="")
cols_to_drop = [col] + cols_to_exclude if type(col) == str else col + cols_to_exclude
ablation_results = run_residual_reg(data, all_feature_cols, columns_to_drop=cols_to_drop, clf=clf, random_seed=random_seed)
roc_score = ablation_results[0]["roc_auc"]
penalty_score = ref_bin_scores["roc_auc"] - roc_score
r2_score = ablation_results[1]["r2_train"]
penalty_r2 = ref_reg_scores["r2_train"] - r2_score
col_name = col if type(col) == str else col[0][:(col[0].find("x") - 1)]
by_col_ablation.append({ "col": col_name, "roc_score": roc_score, "penalty_score": penalty_score, "r2_score": r2_score, "penalty_r2": penalty_r2})
print(" done") # for new line
result_df =
|
pd.DataFrame(by_col_ablation)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import os
from typing import IO
import pandas as pd
from PySDDP.dessem.script.templates.restseg import RestsegTemplate
COMENTARIO = '&'
CABECALHO = 'X'
class Restseg(RestsegTemplate):
"""
Classe que contem todos os elementos comuns a qualquer versao do arquivo Restseg do Dessem.
Esta classe tem como intuito fornecer duck typing para a classe Dessem e ainda adicionar um nivel de especificacao
dentro da fabrica. Alem disso esta classe deve passar adiante a responsabilidade da implementacao dos metodos de
leitura e escrita
"""
def __init__(self):
super().__init__()
self.tabseg_indice = dict()
self.tabseg_tabela = dict()
self.tabseg_limite = dict()
self.tabseg_celula = dict()
self.tabseg_indice_df: pd.DataFrame()
self.tabseg_tabela_df: pd.DataFrame()
self.tabseg_limite_df: pd.DataFrame()
self.tabseg_celula_df: pd.DataFrame()
self.restseg = None
self._comentarios_ = None
def ler(self, file_name: str) -> None:
"""
Metodo para leitura do arquivo com as restricoes de seguranca representadas por tabelas
Manual do Usuario III.2 Arquivo contendo informações sobre os limites de segurança para a rede eletrica
fornecidos por tabelas(RESTSEG.XXX).
:param file_name: string com o caminho completo para o arquivo
:return:
"""
dir_base = os.path.split(file_name)[0]
# Listas referentes a TABSEG INDICE
self.tabseg_indice['mneumo'] = list()
self.tabseg_indice['num'] = list()
self.tabseg_indice['descricao'] = list()
# Listas referentes a TABSEG TABELA
self.tabseg_tabela['mneumo'] = list()
self.tabseg_tabela['num1'] = list()
self.tabseg_tabela['tipo1'] = list()
self.tabseg_tabela['tipo2'] = list()
self.tabseg_tabela['num2'] = list()
self.tabseg_tabela['carg'] = list()
# Listas referentes a TABSEG LIMITE
self.tabseg_limite['mneumo'] = list()
self.tabseg_limite['num'] = list()
self.tabseg_limite['var_parm_1'] = list()
self.tabseg_limite['var_parm_2'] = list()
self.tabseg_limite['var_parm_3'] = list()
# Listas referentes a TABSEG CELULA
self.tabseg_celula['mneumo'] = list()
self.tabseg_celula['num'] = list()
self.tabseg_celula['limite'] = list()
self.tabseg_celula['f'] = list()
self.tabseg_celula['par_1_inf'] = list()
self.tabseg_celula['par_1_sup'] = list()
self.tabseg_celula['par_2_inf'] = list()
self.tabseg_celula['par_2_sup'] = list()
self.tabseg_celula['par_3_inf'] = list()
self.tabseg_celula['par_3_sup'] = list()
self.restseg = list()
self._comentarios_ = list()
# noinspection PyBroadException
try:
with open(file_name, 'r', encoding='latin-1') as f: # type: IO[str]
# Seguir o manual do usuario
continua = True
while continua:
self.next_line(f)
linha = self.linha.strip()
# Se a linha for comentario não faço nada e pulo pra proxima linha
if linha[0] == COMENTARIO:
self._comentarios_.append(linha)
self.restseg.append(linha)
continue
if linha[0] == CABECALHO:
self.restseg.append(linha)
continue
mneumo = linha[:13].strip().lower()
self.restseg.append(linha[:13])
# Leitura dos dados de acordo com o mneumo correspondente
if mneumo == 'tabseg indice':
self.tabseg_indice['mneumo'].append(self.linha[:13])
self.tabseg_indice['num'].append(self.linha[14:19])
self.tabseg_indice['descricao'].append(self.linha[20:80])
self.dados['tabseg_indice']['valores'] = self.tabseg_indice
self.tabseg_indice_df =
|
pd.DataFrame(self.tabseg_indice)
|
pandas.DataFrame
|
"""
MIT License
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import gzip
import io
import os
import tarfile
import tempfile
from unittest.mock import mock_open, patch
import warnings
import pandas as pd
from lineage.resources import Resources
from tests import BaseLineageTestCase
class TestResources(BaseLineageTestCase):
def _reset_resource(self):
self.resource._genetic_map = {}
self.resource._genetic_map_name = ""
self.resource._cytoBand_hg19 = pd.DataFrame()
self.resource._knownGene_hg19 =
|
pd.DataFrame()
|
pandas.DataFrame
|
# Data getters
import datetime
import gzip
import json
import logging
import os
import numpy as np
import pandas as pd
from ai_papers_with_code import PROJECT_DIR
DATA_PATH = f"{PROJECT_DIR}/inputs/data"
# Papers with code scripts
def make_year(x):
"""Extracts year from a datetime.datetime object"""
return x.year if pd.isnull(x) is False else np.nan
def read_parse(file_name):
"""Reads, decompresses and parses a pwc file"""
with gzip.open(f"{DATA_PATH}/{file_name}", "rb") as f:
file_content = f.read()
return json.loads(file_content)
def parse_date_string(x, _format="%Y-%m-%d"):
return (
datetime.datetime.strptime(x, "%Y-%m-%d") if pd.isnull(x) is False else np.nan
)
def make_month_year(x):
return datetime.datetime(x.year, x.month, 1) if pd.isnull(x) is False else np.nan
def make_empty_list_na(df, variables):
"""Remove empty lists with np.nans in a dataframe"""
df_ = df.copy()
for v in variables:
df_[v] = df[v].apply(lambda x: x if len(x) > 0 else np.nan)
return df_
def get_pwc_papers():
"""Get papers table"""
# Read and parse the data
paper_json = read_parse("papers-with-abstracts.json.gz")
# Fix missing values
paper_df = pd.DataFrame(paper_json)
paper_df_clean = make_empty_list_na(paper_df, ["tasks", "methods"]).replace(
{None: np.nan, "": np.nan}
)
paper_df_clean["date"] = paper_df_clean["date"].apply(
lambda x: parse_date_string(x)
)
paper_df_clean["month_year"] = paper_df_clean["date"].apply(
lambda x: make_month_year(x)
)
paper_df_clean["year"] = paper_df_clean["date"].apply(lambda x: make_year(x))
return paper_df_clean
def get_pwc_code_lookup():
"""Get papers to code lookup"""
paper_code_table = read_parse("links-between-papers-and-code.json.gz")
pc_df = pd.DataFrame(paper_code_table).replace({None: np.nan})
return pc_df
def get_pwc_methods():
"""Get methods"""
method_json = read_parse("methods.json.gz")
method_df =
|
pd.DataFrame(method_json)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# Radproc - A GIS-compatible Python-Package for automated RADOLAN Composite Processing and Analysis.
# Copyright (c) 2018, <NAME>.
# DOI: https://doi.org/10.5281/zenodo.1313701
#
# Distributed under the MIT License (see LICENSE.txt for more information), complemented with the following provision:
# For the scientific transparency and verification of results obtained and communicated to the public after
# using a modified version of the work, You (as the recipient of the source code and author of this modified version,
# used to produce the published results in scientific communications) commit to make this modified source code available
# in a repository that is easily and freely accessible for a duration of five years after the communication of the obtained results.
"""
=====================
Raw Data Processing
=====================
Functions for raw data processing.
Unzip, import, clip and convert RADOLAN raw data and write DataFrames to HDF5.
.. autosummary::
:nosignatures:
:toctree: generated/
unzip_RW_binaries
unzip_YW_binaries
radolan_binaries_to_dataframe
radolan_binaries_to_hdf5
create_idraster_and_process_radolan_data
process_radolan_data
.. module:: radproc.raw
:platform: Windows
:synopsis: Python package radproc (Radar data processing), Module raw
.. moduleauthor:: <NAME>
"""
import numpy as np
import pandas as pd
import os, sys
import tarfile as _tarfile
import gzip as _gzip
import shutil as _shutil
from datetime import datetime
#from radproc.wradlib_io import read_RADOLAN_composite
#from radproc.sampledata import get_projection_file_path
import radproc.wradlib_io as _wrl_io
import radproc.sampledata as _sampledata
import warnings, tables
def unzip_RW_binaries(zipFolder, outFolder):
"""
Unzips RADOLAN RW binary data saved in monthly .tar or tar.gz archives (e.g. RWrea_200101.tar.gz, RWrea_200102.tar.gz).
If necessary, extracted binary files are zipped to .gz archives to save memory space on disk.
Creates directory tree of style
*<outFolder>/<year>/<month>/<binaries with hourly data as .gz files>*
:Parameters:
------------
zipFolder : string
Path of directory containing RW data as monthly tar / tar.gz archives to be unzipped.
Archive names must contain year and month at end of basename: RWrea_200101.tar or RWrea_200101.tar.gz
outFolder : string
Path of output directory. Will be created if it doesn't exist, yet.
:Returns:
---------
No return value
"""
if not os.path.exists(outFolder):
os.mkdir(outFolder)
# create list of all tar files and identify years
tarFileList = os.listdir(zipFolder)
years = np.unique([f[-10:-6] if f.endswith(".tar") else f[-13:-9] for f in tarFileList])
for year in years:
# only select files of current year
tarFilesYear = [f for f in tarFileList if year in f]
# create new folder for current year
yearFolder = os.path.join(outFolder, year)
os.mkdir(yearFolder)
for monthTarFile in tarFilesYear:
# create month folder for every month archive
if monthTarFile.endswith('.tar.gz'):
month = str(int(monthTarFile[-9:-7]))
elif monthTarFile.endswith('.tar'):
month = str(int(monthTarFile[-6:-4]))
monthFolder = os.path.join(yearFolder, month)
os.mkdir(monthFolder)
# open tar archive and extract all files to month folder
with _tarfile.open(name = os.path.join(zipFolder,monthTarFile), mode = 'r') as tar_ref:
tar_ref.extractall(monthFolder)
binaryList = os.listdir(monthFolder)
# if extracted files are already .gz archives: skip, else: zip binary files to .gz archives and delete unzipped files
if not binaryList[0].endswith(".gz"):
for binaryName in binaryList:
binaryFile = os.path.join(monthFolder, binaryName)
with open(binaryFile, 'rb') as f_in, _gzip.open(os.path.join(monthFolder, binaryName + ".gz"), 'wb') as f_out:
_shutil.copyfileobj(f_in, f_out)
os.remove(binaryFile)
def unzip_YW_binaries(zipFolder, outFolder):
"""
Unzips RADOLAN YW binary data.
Data have to be saved in monthly .tar or tar.gz archives (e.g. YWrea_200101.tar.gz, YWrea_200102.tar.gz),
which contain daily archives with binary files.
If necessary, extracted binary files are zipped to .gz archives to save memory space on disk.
Creates directory tree of style
*<outFolder>/<year>/<month>/<binaries with data in temporal resolution of 5 minutes as .gz files>*
:Parameters:
------------
zipFolder : string
Path of directory containing YW data as monthly tar / tar.gz archives to be unzipped.
Archive names must contain year and month at end of basename: YWrea_200101.tar or YWrea_200101.tar.gz
outFolder : string
Path of output directory. Will be created if it doesn't exist, yet.
:Returns:
---------
No return value
"""
if not os.path.exists(outFolder):
os.mkdir(outFolder)
# create list of all tar files
tarFileList = os.listdir(zipFolder)
years = np.unique([f[-10:-6] if f.endswith(".tar") else f[-13:-9] for f in tarFileList])
for year in years:
# only select files of current year
tarFilesYear = [f for f in tarFileList if year in f]
# create new folder for current year
yearFolder = os.path.join(outFolder, year)
os.mkdir(yearFolder)
# for every month...
for monthTarFile in tarFilesYear:
# create month folder for every month archive
if monthTarFile.endswith('.tar.gz'):
month = str(int(monthTarFile[-9:-7]))
elif monthTarFile.endswith('.tar'):
month = str(int(monthTarFile[-6:-4]))
monthFolder = os.path.join(yearFolder, month)
os.mkdir(monthFolder)
# open tar archive and extract all daily gz archives to month folder
with _tarfile.open(name = os.path.join(zipFolder,monthTarFile), mode = 'r') as tar_ref:
tar_ref.extractall(monthFolder)
# for every day...
dayTarFileList = os.listdir(monthFolder)
for dayTarFile in dayTarFileList:
with _tarfile.open(name = os.path.join(monthFolder, dayTarFile), mode = 'r') as tar_ref:
tar_ref.extractall(monthFolder)
os.remove(os.path.join(monthFolder, dayTarFile))
binaryList = os.listdir(monthFolder)
# if extracted files are already .gz archives: skip, else: zip binary files to .gz archives and delete unzipped files
if not binaryList[0].endswith(".gz"):
for binaryName in binaryList:
binaryFile = os.path.join(monthFolder, binaryName)
with open(binaryFile, 'rb') as f_in, _gzip.open(os.path.join(monthFolder, binaryName + ".gz"), 'wb') as f_out:
_shutil.copyfileobj(f_in, f_out)
os.remove(binaryFile)
def radolan_binaries_to_dataframe(inFolder, idArr=None):
"""
Import all RADOLAN binary files in a directory into a pandas DataFrame,
optionally clipping the data to the extent of an investigation area specified by an ID array.
:Parameters:
------------
inFolder : string
Path to the directory containing RADOLAN binary files.
All files ending with '-bin' or '-bin.gz' are read in.
The input folder path does not need to have any particular directory structure.
idArr : one-dimensional numpy array (optional, default: None)
containing ID values to select RADOLAN data of the cells located in the investigation area.
If no idArr is specified, the ID array is automatically generated from RADOLAN metadata
and RADOLAN precipitation data are not clipped to any investigation area.
:Returns:
---------
(df, metadata) : tuple with two elements:
df : pandas DataFrame containing...
- RADOLAN data of the cells located in the investigation area
- datetime row index with defined frequency depending on the RADOLAN product and time zone UTC
- ID values as column names
metadata : dictionary
containing metadata from the last imported RADOLAN binary file
In case any binary files could not be read in due to processing errors,
these are skipped and the respective intervals are filled with NoData (NaN) values.
A textfile with the names and error messages for the respective monthly input data folder is written for information.
For example, errors due to obviously corrupted file formats are known for the RADOLAN RW dataset
in July and August 2005 and May 2007.
:Format description and examples:
---------------------------------
Every row of the output DataFrame equals a precipitation raster of the investigation area at the specific date.
Every column equals a time series of the precipitation at a specific raster cell.
Data can be accessed and sliced with the following Syntax:
**df.loc[row_index, column_name]**
with row index as string in date format 'YYYY-MM-dd hh:mm' and column names as integer values
**Examples:**
>>> df.loc['2008-05-01 00:50',414773] #--> returns single float value of specified date and cell
>>> df.loc['2008-05-01 00:50', :] #--> returns entire row (= raster) of specified date as one-dimensional DataFrame
>>> df.loc['2008-05-01', :] #--> returns DataFrame with all rows of specified day (because time of day is omitted)
>>> df.loc[, 414773] #--> returns time series of the specified cell as Series
"""
try:
# List all files in directory
files = os.listdir(inFolder)
except:
print("Directory %s can not be found. Please check your input parameter!" % inFolder)
sys.exit()
ind = []
# Check file endings. Only keep files ending on -bin or -bin.gz which are the usual formats of RADOLAN binary files
files = [f for f in files if f.endswith('-bin') or f.endswith('-bin.gz')]
# Load first binary file to access header information
try:
data, metadata = _wrl_io.read_RADOLAN_composite(os.path.join(inFolder, files[0]))
del data
except:
# if file could not be read, try next file until metadata of one file could be accessed
got_metadata = False
i=0
while got_metadata == False:
print("Can not open %s to access metadata. Trying next file." % files[i])
i+=1
try:
data, metadata = _wrl_io.read_RADOLAN_composite(os.path.join(inFolder, files[i]))
del data
got_metadata = True
except:
got_metadata = False
# interrupt after first 100 files to avoid infinite loops
if i == 100:
print('Could not read the first 100 files in. Exit script. Please check your input files and parameters.')
raise
# different RADOLAN products have different grid sizes (e.g. 900*900 for the RADOLAN national grid,
# 1100*900 for the extended national grid used for RADKLIM)
gridSize = metadata['nrow'] * metadata['ncol']
# if no ID array is specified, generate it from metadata
if idArr is None:
idArr = np.arange(0, gridSize)
# Create two-dimensiona array of dtype float32 filled with zeros. One row per file in inFolder, one column per ID in idArr.
dataArr = np.zeros((len(files), len(idArr)), dtype = np.float32)
skipped_files = []
error_messages = []
# For each file in directory...
for i in range(0, len(files)):
# Read data and header of RADOLAN binary file
try:
data, metadata = _wrl_io.read_RADOLAN_composite(os.path.join(inFolder, files[i]))
# append datetime object to index list. Pandas automatically interprets this list as timeseries.
ind.append(metadata['datetime'])
# binary data block starts in the lower left corner but ESRI Grids are created starting in the upper left corner by default
# [::-1] --> reverse row order of 2D-array so the first row ist located in the geographic north
# reshape(gridSize,) --> convert to one-dimensional array
data = data[::-1].reshape(gridSize,)
# Replace NoData values with NaN
data[data == metadata['nodataflag']] = np.nan
# Clip data to investigation area by selecting all values with a corresponding ID in idArr
# and insert data as row in the two-dimensional data array.
dataArr[i,:] = data[idArr]
except Exception as e:
skipped_files.append(files[i])
error_messages.append(str(e))
# extract datetime from filename instead of metadata
date_str = files[i].split("-")[2]
datetime_obj = datetime.strptime(date_str, '%y%m%d%H%M')
# some early RADOLAN intervals start at HH:45, but in file name stands HH:50
if ind[0].minute == 45:
datetime_obj = datetime_obj.replace(minute=45)
# append extracted date to index and insert NaN to all cells of the skipped interval
ind.append(datetime_obj)
dataArr[i,:] = np.zeros((1, len(idArr)), dtype=np.float32).fill(np.nan)
# Convert 2D data array to DataFrame, set timeseries index and column names and localize to time zone UTC
df = pd.DataFrame(dataArr, index = ind, columns = idArr)
df.columns.name = 'Cell-ID'
df.index.name = 'Date (UTC)'
df.index = df.index.tz_localize('UTC')
#df = df.tz_localize('UTC')
metadata['timezone'] = 'UTC'
metadata['idArr'] = idArr
# check for RADOLAN product type and set frequency of DataFrame index
# lists can be extended for other products...
if metadata['producttype'] in ["RW"]:
try:
# try to prevent dataframe copying by .asfreq(). this does not seem to work in all pandas versions --> try - except
df.index.freq = pd.tseries.offsets.Hour()
except:
df = df.asfreq('H')
elif metadata['producttype'] in ["RY", "RZ", "YW"]:
try:
df.index.freq = 5 *
|
pd.tseries.offsets.Minute()
|
pandas.tseries.offsets.Minute
|
"""
Copyright (c) 2018-2021 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
* The origin of this software must not be misrepresented; you must not claim that you wrote the original software.
If you use this software in a product, an acknowledgment is required by displaying the trademark/log as per the details
provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
* Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
* This notice may not be removed or altered from any source distribution.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import os
import json
import time
import string
import requests
import pandas as pd
from math import trunc
from time import strftime
from random import choice
from celery import shared_task
from django.core.mail import send_mail
from device_notification_subsystem.settings import conf
from .models import Sms, SmsContent, Email, EmailContent, UniqueEmail, UniqueMsisdn
@shared_task()
def celery_email(subject, content, sender, recipient):
recipients = [recipient]
send_mail(subject, content, sender, recipients, fail_silently=False)
return "Email has been sent"
@shared_task()
def jasmine_sms(msisdn, content, sender_no, operator):
url = conf['jasmine_single_sms_url']
sms_body = {
"to": msisdn,
"from": sender_no,
# "coding": 0,
"content": content
}
auth = jasmin_auth(operator)
headers = {'content-type': 'application/json', 'Authorization': auth}
response = requests.post(url=url, data=json.dumps(sms_body), headers=headers)
if response:
if response.status_code == 200:
return "SMS sent to {m} : {c}".format(m=msisdn, c=content)
else:
return "SMS delivery to {m} is failed".format(m=msisdn)
@shared_task
def bulk_email_db(subject, content, subsystem, sender_email):
t1 = time.time()
qry = UniqueEmail.objects.order_by('email').values_list('email', flat=True).distinct()
campaign_id = "Email_DB_" + strftime("%Y-%m-%d_%H-%M-%S")
for q in qry:
print(f"sending email to: {q}")
celery_email.apply_async(args=[subject, content, sender_email, q], queue='email_que') # calling celery task
write_email_db(q, sender_email, subsystem, subject, content, campaign_id)
t2 = time.time()
return "Email campaign completed in: {time} secs".format(time=(t2 - t1))
@shared_task
def bulk_sms_db(content, operator, subsystem, sender_no, sms_rate):
t1 = time.time()
qry = UniqueMsisdn.objects.order_by('msisdn').values_list('msisdn', flat=True).distinct()
start = 0
total_msisdns = len(qry)
sms_batch_size = sms_rate
total_chunks = (total_msisdns / sms_batch_size)
if total_chunks.is_integer(): total_chunks = trunc(total_chunks)
else: total_chunks = trunc(total_chunks) + 1
print("total chunks = ", total_chunks)
end = sms_batch_size
for chunk in range(0, total_chunks):
if chunk != total_chunks - 1:
msisdn_list = qry[start:end]
start = end
end = end + sms_batch_size
else:
msisdn_list = qry[start:]
print("processing chunk-", chunk + 1)
# res = send_sms_batch(msisdn_list, content)
send_sms_batch(msisdn_list, operator, content)
print("DB insertion started ")
campaign_id = "SMS_DB_" + strftime("%Y-%m-%d_%H-%M-%S")
# result = [write_sms_db(q, sender_no, operator, subsystem, content, campaign_id) for q in qry]
[write_sms_db(q, sender_no, operator, subsystem, content, campaign_id) for q in qry]
t2 = time.time()
return "SMS campaign completed in: {time} sec".format(time=(t2 - t1))
# noinspection PyUnboundLocalVariable,PyUnusedLocal
@shared_task
def bulk_email_file(file, subject, content, subsystem, sender_email):
"""Function to create celery task of processing bulk file for sending Email campaigns """
t1 = time.time()
# extracting file path & file name
file_path, file_name = os.path.split(file)
try:
# read the file into DataFrame
df_csv = pd.read_csv(file, usecols=range(2), dtype={"imei": str, "imsi": str, "email": str},
chunksize=conf['df_big_chunksize'])
except Exception as e:
if e:
error = {"Error": "File content is not Correct"}
return json.dumps(error)
df = pd.concat(df_csv)
# removing white spaces from Column 'email'
df['email'] = df['email'].str.strip()
# removing Email-IDs with wrong format
df = df[(df.email.astype(str).str.match(conf['validation_regex']['email']))]
rows, cols = df.shape
print(rows, cols)
start = 0
# generating random string for file-name
all_char = string.ascii_letters + string.digits
rand_str = "".join(choice(all_char) for x in range(8))
if rows >= 10:
chunk_size = trunc(rows / 10)
end = chunk_size
email_files, all_files = [], []
for i in range(1, 11):
print(start, end)
f_all = "Email_" + rand_str + "_chunk_" + str(i) + ".csv"
file_all = os.path.join(file_path, f_all)
print(file_all)
all_files.append(file_all)
if i != 10:
df[start:end].to_csv(file_all, index=False)
start = end
end = end + chunk_size
else:
df[start:].to_csv(file_all, index=False)
else:
return "File must contain more than 10 Email-IDs"
for i in range(1, 11):
print("Processing File-", i)
all_file = all_files[i - 1]
que = 'que' + str(i)
process_email_file.apply_async(args=[all_file, subject, content, subsystem, file_name, sender_email],
queue=que)
t2 = time.time()
return "File chunking completed in: {time} sec".format(time=(t2 - t1))
# noinspection PyUnboundLocalVariable,PyUnusedLocal
@shared_task
def bulk_sms_file(file, content, operator, subsystem, sender_no, sms_rate):
"""Function to create celery task of processing bulk file for sending SMS campaign """
t1 = time.time()
# extracting file path & file name
file_path, file_name = os.path.split(file)
try:
# read the file into DataFrame
df_csv = pd.read_csv(file, usecols=range(5), dtype={"imei": str, "imsi": str, "msisdn": str, "block_date": str,
"reasons": str}, chunksize=conf['df_big_chunksize'])
except Exception as e:
if e:
error = {"Error": "File content is not Correct"}
return json.dumps(error)
df = pd.concat(df_csv)
# removing white spaces from Column 'msisdn'
df['msisdn'] = df['msisdn'].str.strip()
# removing MSISDN with wrong format
df = df[(df.msisdn.astype(str).str.match(conf['validation_regex']['msisdn']))]
# Copying "MSISDN" column to new DataFrame
df_new = pd.DataFrame()
df_new['msisdn'] = df['msisdn']
rows, cols = df_new.shape
print(rows, cols)
start = 0
# generating random string for file-name
all_char = string.ascii_letters + string.digits
rand_str = "".join(choice(all_char) for x in range(8))
if rows >= 10:
chunk_size = trunc(rows / 10)
end = chunk_size
msisdn_files, all_files = [], []
for i in range(1, 11):
print(start, end)
f_msisdn = "MSISDN_only_" + rand_str + "_chunk_" + str(i) + ".csv"
f_all = "File_all_" + rand_str + "_chunk_" + str(i) + ".csv"
file_msisdn = os.path.join(file_path, f_msisdn)
file_all = os.path.join(file_path, f_all)
print(file_msisdn)
msisdn_files.append(file_msisdn)
all_files.append(file_all)
if i != 10:
df_new[start:end].to_csv(file_msisdn, index=False)
df[start:end].to_csv(file_all, index=False)
start = end
end = end + chunk_size
else:
df_new[start:].to_csv(file_msisdn, index=False)
df[start:].to_csv(file_all, index=False)
else:
return "File must contain more than 10 MSISDNs"
for i in range(1, 11):
print("Processing File-", i)
msisdn_file = msisdn_files[i-1]
all_file = all_files[i-1]
que = 'que' + str(i)
process_sms_file.apply_async(args=[msisdn_file, all_file, content, operator, subsystem, file_name,
sender_no, sms_rate], queue=que)
t2 = time.time()
return "File chunking completed in: {time} sec".format(time=(t2 - t1))
@shared_task
def process_email_file(file_all, subject, content, subsystem, file_name, sender_email):
t1 = time.time()
df_t2 = pd.read_csv(file_all, chunksize=conf['df_small_chunksize'])
df_all = pd.concat(df_t2)
campaign_id = "Email_File_" + strftime("%Y-%m-%d_%H-%M-%S")
for row in df_all.itertuples(index=False):
print(f"Sending Email to {row[1]}")
celery_email(subject, content, sender_email, row[1])
write_email_db(row[1], sender_email, subsystem, subject, content, campaign_id, file_name, row[0])
t2 = time.time()
return "File processing is completed in {time} secs".format(time=(t2 - t1))
@shared_task
def process_sms_file(file_msisdn, file_all, content, operator, subsystem, file_name, sender_no, sms_rate):
start = 0
t1 = time.time()
df_t =
|
pd.read_csv(file_msisdn, chunksize=conf['df_small_chunksize'])
|
pandas.read_csv
|
from toolz import interleave
import os
import pandas as pd
import json
import shutil
import numpy as np
import copy
from scipy.spatial.transform import Rotation as R
main_path = os.getcwd()
file_path = main_path + '/framework/pre-processing/interleaved-dataframe/_output/S08-A09-Orientationjoints-1.csv'
out_path = main_path + '/framework/pre-processing/interleaved-dataframe/test/turn_subject_output.csv'
def unitary_rotation_quaternion(x:float, y:float, z:float, a:float):
rotation_factor = np.sin( a / 2.0 )
x = x * rotation_factor
y = y * rotation_factor
z = z * rotation_factor
w = np.cos( a / 2 )
return [w,x,y,z]
def quaternion_multiply(quaternion1, quaternion0):
w0, x0, y0, z0 = quaternion0
w1, x1, y1, z1 = quaternion1
return np.array([-x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0,
x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0,
-x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0,
x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0], dtype=np.float32)
def create_rotated_images(grades, original_df):
vector_de_rotacion = unitary_rotation_quaternion(0,0,1, grades*np.pi/180)
r = R.from_euler('z', 180, degrees=True)
vector_r = r.as_quat()
# si usamos este entonces la línea 24 debe ser
# x0, y0, z0, w0
# pero si comparamos los dos vemos que el resultado es el mismo.
original_df_array = original_df.values
final_df_array = copy.deepcopy(original_df_array)
i = 0
for element in original_df_array:
final_df_array[i] = quaternion_multiply(vector_de_rotacion, element)
i = i +1
return final_df_array
#################
# Main #
#################
df = pd.read_csv(file_path, header=None)
df = df.iloc[1:]
df_first = df.iloc[:,0].astype(np.float32)
df_first = df_first.reset_index(drop=True)
df = df.drop(df.columns[[0]], axis=1)
df = df.astype(np.float32)
rotated_df = create_rotated_images(180, df)
final_df = pd.DataFrame(rotated_df.reshape(15192,4), columns=["w","x","y","z"])
final_df =
|
pd.concat([df_first, final_df], ignore_index=True, axis=1)
|
pandas.concat
|
# coding: utf-8
# In[24]:
# Import necessary libraries
import boto3
import pandas as pd
import numpy as np
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
client = boto3.client('dynamodb', region_name='us-east-1')
print('Pandas version', pd.__version__)
print('Numpy version', np.__version__)
print('Seaborn version', sns.__version__)
# In[25]:
# Read datasets from CSVs
df =
|
pd.read_csv('./tmp/ratings.csv')
|
pandas.read_csv
|
from datetime import datetime
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import accuracy_score, f1_score, make_scorer, r2_score, mean_squared_error
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
def impute_mode(df, variable):
'''
Usage: replace NaN with the mode in specific column
Input arguments:
df -- a dataframe object
variable -- a column where you want to apply imputation
Return: None
'''
# find most frequent category
most_frequent_category = df.groupby([variable])[variable].count().sort_values(ascending=False).index[0]
# replace NA
df[variable].fillna(most_frequent_category, inplace=True)
def day_diff(df):
'''
Usage: calculate the day difference using columns "host_since" and "last_scraped"
Input arguments:
df -- a dataframe object
Return: None
'''
if ('last_scraped' in df.columns) & ('host_since' in df.columns):
df['host_days'] = (
|
pd.to_datetime(df['last_scraped'])
|
pandas.to_datetime
|
#!/usr/bin/python3
import pandas as pd
import numpy as np
coh= snakemake.wildcards.cohort
def pheno_harvest():
mfr= pd.read_csv(snakemake.input[0], sep= '\t', header= 0)
trio= pd.read_csv(snakemake.input[1], sep= '\t', header= None, names= ['PREG_ID', 'Child', 'Father', 'Mother'])
trio.dropna(subset= ['PREG_ID'], inplace= True)
mfr['PREG_ID']= coh + '_' + mfr.PREG_ID_1724.astype(int).map(str)
# trio['PREG_ID']= trio.PREG_ID.str.replace('.0', '')
d= pd.merge(mfr, trio, on= ['PREG_ID'], how= 'inner')
d= d[(d['FLERFODSEL']==0)]
d= d[(d['DODKAT']<6) | (d['DODKAT']>10)]
d= d[(d['SVLEN_UL_DG']< 308)]
d= d[(d['SVLEN_UL_DG']> 154)]
d.dropna(subset= ['SVLEN_UL_DG'], inplace= True)
d= d.sample(frac=1)
flag= pd.read_csv(snakemake.input[2], sep= '\t', header= 0)
pca_out= [line.strip() for line in open(snakemake.input[3], 'r')]
flag= flag[(flag['genotypesOK']== True) & (flag['phenotypesOK']== True) & (~flag['IID'].isin(pca_out))]
# d= d.loc[(d.Child.isin(flag.IID)) & (d.Mother.isin(flag.IID)) & (d.Father.isin(flag.IID)), :]
d['Child']= np.where(d.Child.isin(flag.IID), d.Child, np.nan)
d['Mother']= np.where(d.Mother.isin(flag.IID), d.Mother, np.nan)
d['Father']= np.where(d.Father.isin(flag.IID), d.Father, np.nan)
d['spont']= np.where((d.FSTART==1) | (((d.KSNITT.isnull()) | (d.KSNITT>1)) & ((d.KSNITT_PLANLAGT.isnull()) | (d.KSNITT_PLANLAGT==1)) & (d.INDUKSJON_PROSTAGLANDIN==0) & (d.INDUKSJON_ANNET==0) & (d.INDUKSJON_OXYTOCIN==0) & (d.INDUKSJON_AMNIOTOMI==0)) | (~d.VANNAVGANG.isnull()) , 1, 0)
d['PROM']= np.where(d.VANNAVGANG.isnull(), 0, 1)
d['PARITY0']= np.where(d.PARITET_5==0, 1, 0)
d['cohort']= coh
d.drop_duplicates(subset= ['Mother'], keep= 'first', inplace= True)
d['OBint']= np.where(d.spont==0, 1, 0)
d['MOR_ALDER']= d.MORS_ALDER
d['FAR_ALDER']= d.FARS_ALDER_KAT_K8
d= d[['Child', 'Mother', 'Father', 'PREG_ID', 'spont', 'OBint', 'PROM', 'SVLEN_UL_DG', 'PARITY0', 'cohort', 'MOR_ALDER', 'FAR_ALDER']]
return d
def pheno_rotterdam():
mfr= pd.read_csv(snakemake.input[0], sep= '\t', header= 0)
trio= pd.read_csv(snakemake.input[1], sep= '\t', header= None, names= ['PREG_ID', 'Child', 'Father', 'Mother'])
trio.dropna(subset= ['PREG_ID'], inplace= True)
mfr['PREG_ID']= coh + '_' + mfr.PREG_ID_315.astype(int).map(str)
d= pd.merge(mfr, trio, on= ['PREG_ID'], how= 'inner')
d= d[(d['FLERFODSEL']=='Enkeltfødsel')]
d= d[d['DODKAT'].str.contains('Levendefødt')]
d= d[(d['SVLEN_UL_DG']< 308)]
d= d[(d['SVLEN_UL_DG']> 154)]
d.dropna(subset= ['SVLEN_UL_DG'], inplace= True)
flag= pd.read_csv(snakemake.input[2], sep= '\t', header= 0)
pca_out= [line.strip() for line in open(snakemake.input[3], 'r')]
flag= flag[(flag['genotypesOK']== True) & (flag['phenoOK']== True) & (~flag['IID'].isin(pca_out))]
d['Child']= np.where(d.Child.isin(flag.IID), d.Child, np.nan)
d['Mother']= np.where(d.Mother.isin(flag.IID), d.Mother, np.nan)
d['Father']= np.where(d.Father.isin(flag.IID), d.Father, np.nan)
d['spont']= np.where(((d.FSTART=='Spontan') | (d.FSTART== '')) | (((d.KSNITT=='') | (d.KSNITT== 'Uspesifisert') | (d.KSNITT== 'Akutt keisersnitt')) & (d.INDUKSJON_PROSTAGLANDIN=='Nei') & (d.INDUKSJON_ANNET=='Nei') & (d.INDUKSJON_OXYTOCIN=='Nei') & (d.INDUKSJON_AMNIOTOMI=='Nei')) | (~d.VANNAVGANG.isnull()), 1, 0)
d['PROM']= np.where(d.VANNAVGANG.isnull(), 0, 1)
d['PARITY0']= np.where(d.PARITET_5=='0 (førstegangsfødende)', 1, 0)
d['cohort']= coh
d.drop_duplicates(subset= ['Mother'], keep= 'first', inplace= True)
d['OBint']= np.where(d.spont==0, 1, 0)
d['MOR_ALDER']= d.FAAR - d.MOR_FAAR
d['FAR_ALDER']=
|
pd.Categorical(d.FARS_ALDER_KAT_K8)
|
pandas.Categorical
|
# -*- coding: utf-8 -*-
"""
@author: LeeZChuan
"""
import pandas as pd
import numpy as np
import requests
import os
from pandas.core.frame import DataFrame
import json
import datetime
import time
pd.set_option('display.max_columns',1000)
pd.set_option('display.width', 1000)
pd.set_option('display.max_colwidth',1000)
def addressProcess(address):
result = address
if '镇' in address:
item = address.split('镇')
result = item[0]+'镇'
elif '农场' in address:
item = address.split('农场')
result = item[0]+'农场'
elif '街道' in address:
item = address.split('街道')
result = item[0]+'街道'
elif '路' in address:
item = address.split('路')
result = item[0]+'路'
elif '大道' in address:
item = address.split('大道')
result = item[0]+'大道'
elif '街' in address:
item = address.split('街')
result = item[0]+'街'
elif '村' in address:
item = address.split('村')
result = item[0]+'村'
return result
def processJson(filePath):
orderNum = 0 #订单数
with open(filepath, 'r', encoding="utf-8") as f:
# 读取所有行 每行会是一个字符串
i = 0
for jsonstr in f.readlines():
list_address = []
list_name = []
jsonstr = jsonstr[1:-1]
# listValue = jsonstr.split(']];,')
listValue = jsonstr.split(']],')
for listitem in listValue:
listitem = listitem[1:]
listCon = listitem.split(',[')
listAddr = listCon[3][:-1].split(',')
if len(listAddr) == 2 and '海南省海口市' in listAddr[0] and '海南省海口市' in listAddr[1]:
list_address_each = []
startAdd = addressProcess(listAddr[0][6:])
endAdd = addressProcess(listAddr[1][6:])
if startAdd != endAdd:
list_address_each.append(startAdd)
list_address_each.append(endAdd)
list_address.append(list_address_each)
list_name.append(startAdd)
list_name.append(endAdd)
pd_list_address = pd.DataFrame(list_name)
# print (pd_list_address)
name_list_count = pd.value_counts(pd_list_address[0], sort=False)
name_df = pd_list_address[0].unique()
name_list = name_df.tolist()
name_list_all = [[name, name_list_count[name]] for name in name_list if name_list_count[name] > 300]
name_list_new = []
for item in name_list_all:
name_list_new.append(item[0])
print (name_list_new)
new_list_address = []
for item in list_address:
if item[0] in name_list_new and item[1] in name_list_new:
new_list = []
new_list.append(item[0])
new_list.append(item[1])
new_list_address.append(new_list)
orderNum += 1
return orderNum, list_address
def save(filename, contents):
fh = open(filename, 'w', encoding='utf-8')
fh.write(contents)
fh.close()
def dataSta(list_address, txtname):
raw_file_df = pd.DataFrame(list_address)
raw_file_df.dropna(axis=0, how='any', inplace=True) #删除含有空值的行
result = raw_file_df.groupby([raw_file_df[0],raw_file_df[1]])
all_result = []
name_result = []
for name, item in result:
each_result = []
each_result.append(name[0])
each_result.append(name[1])
each_result.append(len(item))
all_result.append(each_result)
name_result.append(name[0])
name_result.append(name[1])
name_df = DataFrame(name_result)
name_list_count = pd.value_counts(name_df[0], sort=False)
name_df = name_df[0].unique()
name_list = name_df.tolist()
name_list_all = [[name, name_list_count[name]] for name in name_list]
print (name_list_all)
strValue = "{\"nodes\": [\n"
for item in name_list_all:
strValue = strValue+" {\"name\":\""+item[0] +"\",\n \"value\":"+str(item[1])+" \n },\n"
strValue = strValue[:-2]
strValue = strValue + "\n ],\n"
strValue = strValue + "\"links\": [\n"
for item in all_result:
strValue = strValue+" {\"source\":\""+item[0]+"\", \"target\":\""+item[1]+"\", \"value\":"+str(item[2])+"\n },\n"
strValue = strValue[:-2]
strValue = strValue + "\n ]\n}"
name_path = os.getcwd()+'\dataForMulberryFigure\\'+txtname+'_nodes_links.json'
save(name_path, strValue)
def hexiantu(list_address, txtname):
raw_file_df = pd.DataFrame(list_address)
raw_file_df.dropna(axis=0, how='any', inplace=True) #删除含有空值的行
result = raw_file_df.groupby([raw_file_df[0],raw_file_df[1]])
all_result = []
for name, item in result:
each_result = []
each_result.append(name[0])
each_result.append(name[1])
each_result.append(len(item))
all_result.append(each_result)
strValue = ''
strValue = strValue + "{\"value\": [\n"
for item in all_result:
strValue = strValue+" [\""+item[0]+"\", \""+item[1]+"\", "+str(item[2])+"],\n"
strValue = strValue[:-2]
strValue = strValue + "\n ]}"
name_path = os.getcwd()+'\dataForMulberryFigure\\'+txtname+'_hexiantu.json'
save(name_path, strValue)
def read_csv(filepath):
# raw_train_df = pd.read_csv(fileInfo, sep='\s+', engine='python').loc[:,[name_title+'arrive_time',name_title+'starting_lng',name_title+'starting_lat',name_title+'dest_lng',name_title+'dest_lat']]
raw_train_df = pd.read_csv(filepath, sep=',', engine='python').loc[:,['order_id','product_id','type','combo_type','traffic_type','passenger_count', 'driver_product_id', 'start_dest_distance', 'arrive_time', 'departure_time', 'pre_total_fee', 'normal_time', 'bubble_trace_id', 'product_1level', 'year', 'month', 'year', 'starting_lng', 'starting_lat', 'dest_lng', 'dest_lat']]
return raw_train_df
def orderNumByHour(filepath, txtname):
raw_train_df = read_csv(filepath)
raw_train_df['hour'] = [pd.to_datetime(item).hour for item in raw_train_df['departure_time']]
result = ''
result_distance = '[\n'
groupedByHour = raw_train_df.groupby(['hour'])
for group_name, group_data in groupedByHour:
result = result+str(group_name)+','+str(group_data.shape[0])+'\n'
result_distance = result_distance +' [\n \"'+str(group_name)+'\",\n '+str(group_data.shape[0])+',\n '+str(int(group_data['passenger_count'].mean())/1000)+'\n ],\n'
result_order = result_distance[:-2] + '\n]'
name_path = os.getcwd()+'\lineChart\\'+txtname+'_lineChart.json'
save(name_path, result_order)
def save2(filepath, filename, contents):
if not os.path.exists(filepath):
os.mkdir(filepath)
path = filepath + '\\' + filename
fh = open(path, 'w', encoding='utf-8')
fh.write(contents)
fh.close()
def averagenum(num):
nsum = 0
for i in range(len(num)):
nsum += num[i]
return nsum / len(num)
def grade_mode(list):
'''
计算众数
参数:
list:列表类型,待分析数据
返回值:
grade_mode: 列表类型,待分析数据的众数
'''
# TODO
# 定义计算众数的函数
# grade_mode返回为一个列表,可记录一个或者多个众数
list_set=set(list)#取list的集合,去除重复元素
frequency_dict={}
for i in list_set:#遍历每一个list的元素,得到该元素何其对应的个数.count(i)
frequency_dict[i]=list.count(i)#创建dict; new_dict[key]=value
grade_mode=[]
for key,value in frequency_dict.items():#遍历dict的key and value。key:value
if value==max(frequency_dict.values()):
grade_mode.append(key)
def thermodynamicByHour(filepath, txtname):
raw_train_df = read_csv(filepath)
raw_train_df['hour'] = [pd.to_datetime(item).hour for item in raw_train_df['departure_time']]
list_count_start = []
list_count_end = []
groupedByHour = raw_train_df.groupby(['hour'])
for group_name, group_data in groupedByHour:
print ('处理数据的时间段:', group_name)
result = '[\n'
groupByLocation = group_data.groupby([group_data['starting_lng'],group_data['starting_lat']])
for group_name2, group_data2 in groupByLocation:
list_count_start.append(len(group_data2))
if group_name2[0] > 100 and group_name2[1] < 40:
result = result + ' {\n \"lng\": ' + str(group_name2[0]) + ',\n \"lat\": ' + str(group_name2[1]) + ',\n \"count\": ' + str(len(group_data2)) + '\n },\n'
result = result[:-2] + '\n]'
result2 = '[\n'
groupByLocation2 = group_data.groupby([group_data['dest_lng'],group_data['dest_lat']])
for group_name3, group_data3 in groupByLocation2:
list_count_end.append(len(group_data3))
if group_name3[0] > 100 and group_name3[1] < 40:
result2 = result2 + ' {\n \"lng\": ' + str(group_name3[0]) + ',\n \"lat\": ' + str(group_name3[1]) + ',\n \"count\": ' + str(len(group_data3)) + '\n },\n'
result2 = result2[:-2] + '\n]'
txt_start = txtname+'_start'
txt_dest = txtname+'_dest'
path_start = os.getcwd()+'\dataForMulberryFigure\\'+txt_start
path_dest = os.getcwd()+'\dataForMulberryFigure\\'+txt_dest
name = str(group_name)+'.json'
save2(path_start, name, result)
save2(path_dest, name, result2)
def get_week_day(date):
week_day_dict = {
0 : '星期一',
1 : '星期二',
2 : '星期三',
3 : '星期四',
4 : '星期五',
5 : '星期六',
6 : '星期天',
}
day = date.weekday()
return week_day_dict[day]
def strGetAve(str1, str2):
return ((int(str1)+int(str2))/2)
def calendarHeatMap(foldername):
weatherPath = 'weather_05.xlsx'
weather_df = pd.DataFrame(pd.read_excel(weatherPath))
weather_df = weather_df.loc[:,['日期','天气状况','气温','holiday']]
weather_df['最高温度'] = [item[:2] for item in weather_df['气温']]
weather_df['最低温度'] = [item[-3:-1] for item in weather_df['气温']]
weather_df['平均温度'] = [strGetAve(item[:2],item[-3:-1]) for item in weather_df['气温']]
weather_df['周几'] = [get_week_day(st) for st in weather_df['日期']]
filelist=os.listdir('datasets')
dayLists = []
i = 0
for item in filelist:
dayList = []
dayList.append(item[:-4])
filename = 'datasets/' + item
raw_train_df = read_csv(filename)
dayList.append(raw_train_df.shape[0])
dayList.append(weather_df['天气状况'][i])
dayList.append(weather_df['周几'][i])
dayList.append(weather_df['最高温度'][i])
dayList.append(weather_df['最低温度'][i])
dayList.append(weather_df['平均温度'][i])
dayList.append(weather_df['holiday'][i])
i += 1
dayLists.append(dayList)
result = '[\n'
for item in dayLists:
print ('dealing--------:' + str(item[0]))
if str(item[7]) == '0':
result = result + ' [\n \"' + str(item[0]) +'\",\n ' + str(item[1]) + ',\n \"' + str(item[2]) + '\",\n \"' + str(item[3]) + '\",\n \"' + str(item[4]) + '\",\n \"' + str(item[5]) + '\",\n \"' + str(item[6]) + '\",\n \"' + '\"\n ],\n'
else:
result = result + ' [\n \"' + str(item[0]) +'\",\n ' + str(item[1]) + ',\n \"' + str(item[2]) + '\",\n \"' + str(item[3]) + '\",\n \"' + str(item[4]) + '\",\n \"' + str(item[5]) + '\",\n \"' + str(item[6]) + '\",\n \"' + str(item[7]) + '\"\n ],\n'
file = open('calendarHeatMap.json','w', encoding="utf-8")
file.write(result[:-2]+'\n]')
file.close()
def readTxt(filename):
pos = []
with open(filename, 'r', encoding='utf-8') as file_to_read:
while True:
lines = file_to_read.readline() # 整行读取数据
if not lines:
break
pass
p_tmp = [i for i in lines.split(',')] # 将整行数据分割处理,如果分割符是空格,括号里就不用传入参数,如果是逗号, 则传入‘,'字符。
pos.append(p_tmp) # 添加新读取的数据
pass
return pos
def RealtimeStatistics(foldername):
filelist=os.listdir('datasets')
realtimeStati = []
for item in filelist:
print ('dealing>>>>>', item)
dayList = []
dayList.append(item[:-4])
filename = 'datasets/' + item
pos = readTxt(filename)
pos = pos[1:]
pos = DataFrame(pos)
pos = pos.drop([1], axis=1)
pos.columns = ['order_id','product_id','type','combo_type','traffic_type','passenger_count', 'driver_product_id', 'start_dest_distance', 'arrive_time', 'departure_time', 'pre_total_fee', 'normal_time', 'bubble_trace_id', 'product_1level', 'year', 'month', 'day', 'starting_lng', 'starting_lat', 'dest_lng', 'dest_lat']
pos['passenger_count'] = [float(item)/1000 for item in pos['passenger_count']]
pos['normal_time'] = ['0' if str(item) == '' else item for item in pos['normal_time']]
pos['changtu'] = [1 if item > 30 or item == 30 else 0 for item in pos['passenger_count']]
result1 = np.round(pos['changtu'].sum()/(pos['passenger_count'].shape[0])*100,3)
pos['kuaiche'] = [1 if str(item) == '3.0' else 0 for item in pos['product_1level']]
result2 = np.round(pos['kuaiche'].sum()/(pos['kuaiche'].shape[0])*100,3)
pos['gaojia'] = [1 if int(float(item)) > 60 or int(float(item)) == 60 else 0 for item in pos['pre_total_fee']]
result3 = np.round(pos['gaojia'].sum()/(pos['pre_total_fee'].shape[0])*100,3)
pos['changshi'] = [1 if int(float(item)) > 60 or int(float(item)) == 60 else 0 for item in pos['normal_time']]
result4 = np.round(pos['changshi'].sum()/(pos['normal_time'].shape[0])*100,3)
print (item[:-4], str(result1)+'%', str(result2)+'%', str(result3)+'%', str(result4)+'%')
dayList.append(str(result1)+'%')
dayList.append(str(result2)+'%')
dayList.append(str(result3)+'%')
dayList.append(str(result4)+'%')
realtimeStati.append(dayList)
file = open('RealtimeStatistics.json','w', encoding="utf-8")
file.write(str(realtimeStati))
file.close()
def normalization2(data):
_range = np.max(abs(data))
return np.round(data / _range, 4)
def normalization(data):
_range = np.max(data) - np.min(data)
return (data - np.min(data)) / _range
def standardization(data):
mu = np.mean(data, axis=0)
sigma = np.std(data, axis=0)
return (data - mu) / sigma
def Histogrammap(foldername):
filelist=os.listdir('datasets')
for item in filelist:
print ('dealing>>>>>', item)
dayList = []
dayList.append(item[:-4])
savefile = item[:-4]
filename = 'datasets/' + item
pos = readTxt(filename)
pos = pos[1:]
pos = DataFrame(pos)
pos = pos.drop([1], axis=1)
pos.columns = ['order_id','product_id','type','combo_type','traffic_type','passenger_count', 'driver_product_id', 'start_dest_distance', 'arrive_time', 'departure_time', 'pre_total_fee', 'normal_time', 'bubble_trace_id', 'product_1level', 'year', 'month', 'day', 'starting_lng', 'starting_lat', 'dest_lng', 'dest_lat']
pos['hour'] = [
|
pd.to_datetime(item)
|
pandas.to_datetime
|
import calendar as cal
from collections import Counter
import copy
import datetime as dt
from dateutil.relativedelta import relativedelta
import math
import operator
import os
import re
import subprocess
import click
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import yaml
from seaice.tools.errors import SeaIceToolsError
from seaice.tools.errors import SeaIceToolsRuntimeError
from seaice.tools.errors import SeaIceToolsValueError
from seaice.tools.plotter.util import save_plot
import seaice.nasateam as nt
import seaice.logging as seaicelogging
import seaice.timeseries as sit
log = seaicelogging.init('seaice.tools')
DEFAULTS = {
'date': dt.date.today(),
'plot_mean': False,
'plot_stdev': False,
'plot_median': False,
'plot_iqr': False,
'plot_idr': False,
'hemi': None,
'month_bounds': (-3, 1),
'nstdevs': 2,
'percentiles': [],
'styles': [],
'year_styles': {},
'years': [],
'data_store': nt.DAILY_DATA_STORE_FILENAME,
'output_dir': None,
'output_file': 'daily_ice_extent_{hemi}.png',
'nday_average': 5,
'divisor': 1e6,
'min_valid': 2,
'legend_side': None
}
PERCENTILE_COLUMN_REGEX = re.compile('^percentile_(?P<percent>.+)$')
YEAR_COLUMN_REGEX = re.compile('^\d{4}(?:-\d{4})?$')
def df_daily_extent(hemi, date=DEFAULTS['date'], years=DEFAULTS['years'],
month_bounds=DEFAULTS['month_bounds'], nstdevs=DEFAULTS['nstdevs'],
data_store=DEFAULTS['data_store'], nday_average=DEFAULTS['nday_average'],
percentiles=DEFAULTS['percentiles'], min_valid=DEFAULTS['min_valid'],
divisor=DEFAULTS['divisor']):
"""Returns a pandas DataFrame with all of the data to graph for the given date,
and other years for comparison. The index is an integer index representing
days since the first of the first month, e.g., with a date in March and a
month_bounds of (-3, 1), index 0 contains values from December 1 of the
preceding year.
See the Click documentation on main() for argument information.
"""
# default to 5 months, with the target date falling in the 4th
start_date, end_date = _bounding_date_range(date, *month_bounds)
date_index = pd.date_range(start_date, end_date, freq='D')
# data for mean and stdev aligned to the date_index
extents = sit.daily(hemi, interpolate=1)['total_extent_km2']
df = _climatology_statistics(extents, date_index, nstdevs, percentiles, nday_average, divisor)
df['date'] = date_index
# uniquify and sort list of years
years = np.unique(list(years) + [date.year])
# drop any years after 'date'
years = years[years <= date.year]
# get the data for each year
for year in years:
new_index = _shift_index_to_year(date_index, date, year)
data_year = _df_year(hemi, new_index, data_store, nday_average=nday_average,
min_valid=min_valid, divisor=divisor)
df = df.merge(data_year, left_index=True, right_index=True, how='outer')
# eliminate any data after the given date, to cut off the line
rows = df.date > date
col = _line_name(date_index)
df.loc[rows, col] = np.nan
return df
def figure(df_in, hemi, date=DEFAULTS['date'], plot_mean=DEFAULTS['plot_mean'],
plot_stdev=DEFAULTS['plot_stdev'], styles=DEFAULTS['styles'],
nstdevs=DEFAULTS['nstdevs'], plot_median=DEFAULTS['plot_median'],
plot_iqr=DEFAULTS['plot_iqr'], plot_idr=DEFAULTS['plot_idr'],
divisor=DEFAULTS['divisor'], legend_side=DEFAULTS['legend_side']):
"""Create a plotly Figure object from a pandas DataFrame containing columns for
each line to plot, focused on the given date.
See the Click documentation on main() for information on the arguments not
listed below.
Arguments
---------
df_in: pandas DataFrame with an index starting at 0, representing days since
the first of the first month shown on the graph.
styles: a list of dicts that can be used for the 'line' value in a plotly
Scatter object. These can be used to customize the plotly properties
color, smoothing, dash, width, and shape.
"""
df = df_in.copy()
df = df.reset_index(drop=True)
data_list = []
# stdev region
if plot_stdev:
s = '' if nstdevs is 1 else 's'
name = '± {n} Standard Deviation{s}'.format(n=nstdevs, s=s)
plots_stdev = _scatter_plots_envelope(df.climatology_lower, df.climatology_upper, name)
data_list.extend(plots_stdev)
# climatology mean line
if plot_mean:
name = '1981-2010 Average'
plot_mean = _scatter_plot_average(df.climatology, name)
data_list.append(plot_mean)
# interdecile region
if plot_idr:
name = 'Interdecile Range'
plots_idr = _scatter_plots_envelope(df['percentile_10'], df['percentile_90'],
name, fillcolor='rgba(229, 229, 229, 1)')
data_list.extend(plots_idr)
# interquartile region
if plot_iqr:
name = 'Interquartile Range'
plots_iqr = _scatter_plots_envelope(df['percentile_25'], df['percentile_75'],
name, fillcolor='rgba(206, 206, 206, 1)')
data_list.extend(plots_iqr)
# climatology median line
if plot_median:
name = '1981-2010 Median'
plot_median = _scatter_plot_average(df['percentile_50'], name)
data_list.append(plot_median)
# lines for all the years
plots_years = []
year_styles = copy.deepcopy(styles)
year_columns = [col for col in df.columns if re.match(YEAR_COLUMN_REGEX, col)]
for year in year_columns:
data_year = df[year]
try:
line_style = year_styles.pop(0)
except IndexError:
line_style = {}
plot_year = _scatter_plot_year(data_year, line_style=line_style)
plots_years.append(plot_year)
data_list.extend(plots_years)
layout = _layout(df, hemi, date, divisor, legend_side)
return go.Figure({'data': data_list, 'layout': layout})
def _month_ticks_and_annotations(dates):
month_nums = pd.Series(pd.to_datetime(dates.values).month)
annotations = []
tickvalues = []
for thing in month_nums.groupby(month_nums):
short_name = cal.month_abbr[thing[0]]
days = thing[1].index
day = days[int(len(days) / 2) - 1]
# name of the month
annotations.append({
'text': short_name,
'x': day,
'showarrow': False,
'yref': 'paper',
'yanchor': 'bottom',
'y': -.05,
'font': {
'size': 22
}})
# tick mark at the first of the month
tickvalues.append(days[0])
return tickvalues, annotations
def _y_range(df_in, hemi, min_range=10):
"""Return a list of length 2, containing a minimum and maximum value for the
y-axis. All columns in df_in must be columns that will be plotted.
"""
df = df_in.copy()
y_min = math.floor(df.min(axis=1).min())
y_max = math.ceil(df.max(axis=1).max())
if (y_max - y_min) < min_range:
y_min = y_max - min_range
y_min = max(y_min - 1, 0)
return [y_min, y_max]
def _y_axis_title(divisor=DEFAULTS['divisor']):
descriptor = {
1e7: 'tens of millions of ',
1e6: 'millions of ',
1e5: 'hundreds of thousands of ',
1e4: 'tens of thousands of ',
1e3: 'thousands of ',
1e2: 'hundreds of ',
1e1: 'tens of ',
1e0: ''
}.get(divisor, 'x{} '.format(divisor))
title = 'Extent ({}square kilometers)'.format(descriptor)
return title
def _layout(df_in, hemi, date=DEFAULTS['date'], divisor=DEFAULTS['divisor'],
legend_side=DEFAULTS['legend_side']):
df = df_in.copy()
dates = df.date
start_of_months, month_annotations = _month_ticks_and_annotations(dates)
return {
'font': {
'color': 'rgba(0, 0, 0, 1)'
},
'margin': {
't': 126,
'r': 110,
'b': 84,
'l': 100
},
'xaxis': {
'showline': True,
'tickvals': start_of_months,
'ticks': 'inside',
'showticklabels': False,
'zeroline': False
},
'yaxis': {
'title': _y_axis_title(divisor),
'showline': True,
'titlefont': {
'size': 28,
},
'range': _y_range(df, hemi),
'tickfont': {
'size': 22
}
},
'title': ('{region} Sea Ice Extent<br>(Area of ocean with at least 15% sea ice)').format(
region={'N': 'Arctic', 'S': 'Antarctic'}[hemi]
),
'titlefont': {
'size': 35
},
'width': 1050,
'height': 840,
'annotations': month_annotations + [
{
'text': 'National Snow and Ice Data Center, University of Colorado Boulder',
'showarrow': False,
'textangle': 270,
'xref': 'paper',
'yref': 'paper',
'x': 1.03,
'y': 0,
'font': {
'size': 16
}
},
{
'text': date.strftime('%d %b %Y'),
'showarrow': False,
'xref': 'paper',
'yref': 'paper',
'x': 1,
'y': -.08,
'font': {
'size': 14
}
}
],
'showlegend': True,
'legend': _legend(df, legend_side)
}
def _legend(df_in, legend_side=DEFAULTS['legend_side']):
df = df_in.copy().drop('date', axis=1)
if legend_side == 'left':
xanchor = 'left'
x = 0
elif legend_side == 'right':
xanchor = 'right'
x = 1
else:
max_idx = df['climatology'].idxmax()
min_idx = df['climatology'].idxmin()
is_concave_up = (max_idx == 0) or (max_idx == (len(df) - 1))
local_extreme = min_idx if is_concave_up else max_idx
is_local_extreme_on_left = local_extreme < (len(df) / 2)
use_left_side = operator.xor(is_concave_up, is_local_extreme_on_left)
if use_left_side:
xanchor = 'left'
x = 0
else:
xanchor = 'right'
x = 1
return {
'xanchor': xanchor,
'yanchor': 'bottom',
'x': x,
'y': 0,
'bgcolor': 'rgba(0, 0, 0, 0)',
'font': {
'size': 22
}
}
def _shift_index_to_year(dt_index, target_date, new_year):
"""Return new datetime index for the new_year
given a dt_index we can shift it a number of years relative to the
target_date and new_year returning a datetime index aligned with the initial
dt_index, but for the target_year
"""
if target_date not in dt_index:
msg = 'Invalid target date {date}. Must be present in datetime index ({index})'.format(
date=target_date,
index=dt_index
)
raise SeaIceToolsRuntimeError(msg)
shifted_index_start = dt_index[0]
delta_years = target_date.year - new_year
shifted_index_start = shifted_index_start -
|
pd.DateOffset(years=delta_years)
|
pandas.DateOffset
|
"""dynamic user-input-responsive part of mood, and mood graphs"""
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from scipy.signal import lsim, lti
from scipy.signal.ltisys import StateSpaceContinuous
from tqdm.autonotebook import tqdm
from IPython.display import display
from persistence.response_cache import (
ResponseCache,
UserInputIdentifier,
)
from feels.mood import (
random_mood_at_pst_datetime,
logit_diff_to_pos_sent,
pos_sent_to_logit_diff,
)
from util.past import MILESTONE_TIMES
from util.times import now_pst, fromtimestamp_pst
MOOD_IMAGE_DIR = "data/mood_images/"
STEP_SEC = 30 * 1
TAU_SEC = 3600 * 12
TAU_SEC_2ND = 60 * 60
WEIGHTED_AVG_START_TIME = pd.Timestamp("2021-01-04 09:10:00")
WEIGHTED_AVG_P75_WEIGHT = 0.5
RESPONSE_SCALE_BASE = 0.15 # 0.1 # 0.2 #0.5
DETERMINER_CENTER = -3.1 # -2.4 # -1.5 #-2
DETERMINER_CENTER_UPDATES = {
pd.Timestamp("2020-08-20 01:00:00"): -2.4,
pd.Timestamp("2020-08-25 14:00:00"): -2.0,
pd.Timestamp("2020-08-31 09:15:00"): -2.4,
pd.Timestamp("2020-09-16 06:00:00"): -2.1,
pd.Timestamp("2020-10-28 17:00:00"): -2.4,
pd.Timestamp("2020-11-04 11:00:00"): -2.78,
pd.Timestamp("2020-11-13 19:00:00"): -2.7,
pd.Timestamp("2020-11-15 07:30:00"): -2.6,
pd.Timestamp("2020-12-04 07:00:00"): -2.5,
pd.Timestamp("2020-12-10 08:35:00"): -2.35,
pd.Timestamp("2020-12-10 23:45:00"): -2.0,
pd.Timestamp("2020-12-18 15:35:00"): -2.2,
pd.Timestamp("2020-12-21 15:25:00"): -2.3,
WEIGHTED_AVG_START_TIME: 0.0,
pd.Timestamp("2021-02-08 09:25:00"): -0.25,
pd.Timestamp("2021-02-14 17:55:00"): -0.125,
pd.Timestamp("2021-02-15 17:25:00"): 0,
pd.Timestamp("2021-02-16 17:45:00"): 0.5,
pd.Timestamp("2021-02-17 12:45:00"): 0,
pd.Timestamp("2021-02-26 17:30:00"): 0.5,
pd.Timestamp("2021-02-27 16:05:00"): 0.,
pd.Timestamp("2021-03-15 09:55:00"): -0.2,
pd.Timestamp("2021-03-15 19:50:00"): -0.4,
pd.Timestamp("2021-03-20 06:55:00"): 0.,
pd.Timestamp("2021-03-24 22:40:00"): -0.3,
pd.Timestamp("2021-03-31 12:25:00"): -0.5,
pd.Timestamp("2021-04-09 07:10:00"): -0.25,
pd.Timestamp("2021-05-05 17:00:00"): 0.,
pd.Timestamp("2021-05-07 18:15:00"): -0.25,
pd.Timestamp("2021-05-12 07:50:00"): 0.,
pd.Timestamp("2021-05-22 09:50:00"): -0.125,
pd.Timestamp("2021-05-23 07:15:00"): -0.25,
pd.Timestamp("2021-06-05 12:05:00"): -0.5,
pd.Timestamp("2021-06-07 22:35:00"): -0.3,
pd.Timestamp("2021-06-08 13:15:00"): 0.,
pd.Timestamp("2021-06-14 06:55:00"): -0.25,
pd.Timestamp("2021-06-15 18:08:00"): 0.,
pd.Timestamp("2021-06-16 13:00:00"): 0.125,
pd.Timestamp("2021-06-26 07:35:00"): 0.25,
pd.Timestamp("2021-06-30 08:40:00"): 0.,
pd.Timestamp("2021-08-06 00:45:00"): -0.125,
pd.Timestamp("2021-09-21 08:25:00"): 0.,
pd.Timestamp("2021-09-22 17:45:00"): -0.075,
pd.Timestamp("2021-10-24 12:15:00"): -0.,
pd.Timestamp("2021-10-24 08:40:00"): 0.125,
pd.Timestamp("2021-10-25 17:55:00"): 0.25,
pd.Timestamp("2021-10-28 22:40:00"): 0.125,
pd.Timestamp("2021-10-31 18:10:00"): 0.05,
pd.Timestamp("2021-11-02 20:40:00"): 0.,
pd.Timestamp("2021-11-15 19:20:00"): 0.05,
pd.Timestamp("2021-11-17 09:10:00"): 0.1,
pd.Timestamp("2021-11-19 14:50:00"): 0.,
pd.Timestamp("2021-12-24 14:45:00"): 0.1,
pd.Timestamp("2021-12-30 09:55:00"): 0.05,
}
DETERMINER_MULTIPLIER_UPDATES = {
pd.Timestamp("2020-08-25 17:00:00"): 0.1 / RESPONSE_SCALE_BASE,
pd.Timestamp("2020-10-21 21:15:00"): 0.075 / RESPONSE_SCALE_BASE,
pd.Timestamp("2020-11-16 10:45:00"): 0.0667 / RESPONSE_SCALE_BASE,
pd.Timestamp("2020-11-25 11:30:00"): 0.1 / RESPONSE_SCALE_BASE,
pd.Timestamp("2020-11-27 08:55:00"): 0.15 / RESPONSE_SCALE_BASE,
pd.Timestamp("2020-12-04 07:00:00"): 0.1 / RESPONSE_SCALE_BASE,
pd.Timestamp("2020-12-09 19:50:00"): 0.075 / RESPONSE_SCALE_BASE,
pd.Timestamp("2020-12-20 23:30:00"): 0.05 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-01-08 08:55:00"): 0.075 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-01-08 09:10:00"): 0.1 / RESPONSE_SCALE_BASE,
|
pd.Timestamp("2021-01-13 09:20:00")
|
pandas.Timestamp
|
import numpy as np
import pandas as pd
from typing import Dict
from cascade_at.core.log import get_loggers
from cascade_at.dismod.api.fill_extract_helpers import utils, reference_tables
from cascade_at.dismod.constants import DensityEnum, IntegrandEnum, \
WeightEnum, MulCovEnum, RateEnum
from cascade_at.model.var import Var
from cascade_at.model.model import Model
LOG = get_loggers(__name__)
DEFAULT_DENSITY = ["uniform", 0, -np.inf, np.inf]
def construct_weight_grid_tables(weights: Dict[str, Var],
age_df, time_df) -> (pd.DataFrame, pd.DataFrame):
"""
Constructs the weight and weight_grid tables."
Parameters
----------
weights
There are four kinds of weights:
"constant", "susceptible", "with_condition", and "total".
No other weights are used.
age_df
Age data frame from dismod db
time_df
Time data frame from dismod db
Returns
-------
Tuple of the weight table and the weight grid table
"""
LOG.info("Constructing weight and weight grid tables.")
names = [w.name for w in WeightEnum]
weight = pd.DataFrame({
'weight_id': [w.value for w in WeightEnum],
'weight_name': names,
'n_age': [len(weights[name].ages) for name in names],
'n_time': [len(weights[name].times) for name in names]
})
weight_grid = []
for w in WeightEnum:
LOG.info(f"Writing weight {w.name}.")
one_grid = weights[w.name].grid[["age", "time", "mean"]].rename(columns={"mean": "weight"})
one_grid["weight_id"] = w.value
weight_grid.append(one_grid)
weight_grid = pd.concat(weight_grid).reset_index(drop=True)
weight_grid = utils.convert_age_time_to_id(
df=weight_grid, age_df=age_df, time_df=time_df
)
weight_grid["weight_grid_id"] = weight_grid.index
return weight, weight_grid
def _add_prior_smooth_entries(grid_name, grid, num_existing_priors, num_existing_grids,
age_df, time_df):
"""
Adds prior smooth grid entries to the smooth grid table and any other tables
it needs to be added to. Called from inside of ``construct_model_tables`` only.
"""
age_count, time_count = (len(grid.ages), len(grid.times))
prior_df = grid.priors
assert len(prior_df) == (age_count * time_count + 1) * 3
# Get the densities for the priors
prior_df.loc[prior_df.density.isnull(), ["density", "mean", "lower", "upper"]] = DEFAULT_DENSITY
prior_df["density_id"] = prior_df["density"].apply(lambda x: DensityEnum[x].value)
prior_df["prior_id"] = prior_df.index + num_existing_priors
prior_df["assigned"] = prior_df.density.notna()
prior_df.rename(columns={"name": "prior_name"}, inplace=True)
# Assign names to each of the priors
null_names = prior_df.prior_name.isnull()
prior_df.loc[~null_names, "prior_name"] = (
prior_df.loc[~null_names, "prior_name"].astype(str) + " " +
prior_df.loc[~null_names, "prior_id"].astype(str)
)
prior_df.loc[null_names, "prior_name"] = prior_df.loc[null_names, "prior_id"].apply(
lambda pid: f"{grid_name}_{pid}"
)
# Convert to age and time ID for prior table
prior_df = utils.convert_age_time_to_id(
df=prior_df, age_df=age_df, time_df=time_df
)
# Create the simple smooth data frame
smooth_df = pd.DataFrame({
"smooth_name": [grid_name],
"n_age": [age_count],
"n_time": [time_count],
"mulstd_value_prior_id": [np.nan],
"mulstd_dage_prior_id": [np.nan],
"mulstd_dtime_prior_id": [np.nan]
})
# Create the grid entries
# TODO: Pass in the value prior ID instead from posterior to prior
long_table = prior_df.loc[prior_df.age_id.notna()][["age_id", "time_id", "prior_id", "kind"]]
grid_df = long_table[["age_id", "time_id"]].sort_values(["age_id", "time_id"]).drop_duplicates()
for kind in ["value", "dage", "dtime"]:
grid_values = long_table.loc[long_table.kind == kind].drop("kind", axis="columns")
grid_values.rename(columns={"prior_id": f"{kind}_prior_id"}, inplace=True)
grid_df = grid_df.merge(grid_values, on=["age_id", "time_id"])
grid_df = grid_df.sort_values(["age_id", "time_id"], axis=0).reindex()
grid_df["const_value"] = np.nan
grid_df["smooth_grid_id"] = grid_df.index + num_existing_grids
prior_df = prior_df[[
'prior_id', 'prior_name', 'lower', 'upper',
'mean', 'std', 'eta', 'nu', 'density_id'
]].sort_values(by='prior_id').reset_index(drop=True)
return prior_df, smooth_df, grid_df
def construct_subgroup_table() -> pd.DataFrame:
"""
Constructs the default subgroup table. If we want to actually
use the subgroup table, need to build this in.
"""
return pd.DataFrame.from_dict({
'subgroup_id': [0],
'subgroup_name': ['world'],
'group_id': [0],
'group_name': ['world']
})
def construct_model_tables(model: Model,
location_df: pd.DataFrame,
age_df: pd.DataFrame,
time_df: pd.DataFrame,
covariate_df: pd.DataFrame) -> Dict[str, pd.DataFrame]:
"""
Main function that loops through the items from a model object, which include
rate, random_effect, alpha, beta, and gamma and constructs the modeling tables in dismod db.
Each of these are "grid" vars, so they need entries in prior,
smooth, and smooth_grid. This function returns those tables.
It also constructs the rate, integrand, and mulcov tables (alpha, beta, gamma),
plus nslist and nslist_pair tables.
Parameters
----------
model
A model object that has rate information
location_df
A location / node data frame
age_df
An age data frame for dismod
time_df
A time data frame for dismod
covariate_df
A covariate data frame for dismod
Returns
-------
A dictionary of data frames for each table name, includes:
rate, prior, smooth, smooth_grid, mulcov, nslist, nslist_pair, and subgroup tables
"""
def compress_priors(rate_name, grid, prior, prior_id):
# Remove identical priors from the prior table, and remap the prior ids
prior_cols = ['value_prior_id', 'dage_prior_id', 'dtime_prior_id']
for col in prior_cols:
pids = grid[col].unique()
prior.loc[prior.prior_id.isin(pids), col] = True
cols = list(set(prior.columns) - set(['prior_id', 'prior_name'])) + prior_cols
grps = sorted(prior.fillna(-999).groupby(cols), key=lambda x: x[1].prior_id.min())
pid = [(prior_id + i, g.prior_id.min(), g.prior_id.unique()) for i,(k,g) in enumerate(grps)]
pmap = {v:k for k,v,ids in pid}
prior = prior.loc[prior.prior_id.isin(list(zip(*pid))[1])]
prior['prior_id'] = prior['prior_id'].replace(pmap)
prior['prior_name'] = [f'{rate_name}_{pid}' for pid in prior.prior_id]
for k,v,ids in pid:
for col in prior_cols:
grid.loc[grid[col].isin(ids), col] = k
prior.drop(columns = prior_cols, inplace=True)
return grid, prior
nslist = {}
smooth_table = pd.DataFrame()
prior_table =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
Provide the groupby split-apply-combine paradigm. Define the GroupBy
class providing the base-class of operations.
The SeriesGroupBy and DataFrameGroupBy sub-class
(defined in pandas.core.groupby.generic)
expose these user-facing objects to provide specific functionality.
"""
from contextlib import contextmanager
import datetime
from functools import partial, wraps
import inspect
import re
import types
from typing import (
Callable,
Dict,
FrozenSet,
Generic,
Hashable,
Iterable,
List,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
from pandas._config.config import option_context
from pandas._libs import Timestamp
import pandas._libs.groupby as libgroupby
from pandas._typing import FrameOrSeries, Scalar
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, cache_readonly, doc
from pandas.core.dtypes.cast import maybe_cast_result
from pandas.core.dtypes.common import (
ensure_float,
is_bool_dtype,
is_datetime64_dtype,
is_extension_array_dtype,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core import nanops
import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical, DatetimeArray
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base, ops
from pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter
_common_see_also = """
See Also
--------
Series.%(name)s
DataFrame.%(name)s
"""
_apply_docs = dict(
template="""
Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a {input} as its first
argument and return a DataFrame, Series or scalar. `apply` will
then take care of combining the results back together into a single
dataframe or series. `apply` is therefore a highly flexible
grouping method.
While `apply` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. Pandas offers a wide range of method that will
be much faster than using `apply` for their specific purposes, so try to
use them before reaching for `apply`.
Parameters
----------
func : callable
A callable that takes a {input} as its first argument, and
returns a dataframe, a series or a scalar. In addition the
callable may take positional and keyword arguments.
args, kwargs : tuple and dict
Optional positional and keyword arguments to pass to `func`.
Returns
-------
applied : Series or DataFrame
See Also
--------
pipe : Apply function to the full GroupBy object instead of to each
group.
aggregate : Apply aggregate function to the GroupBy object.
transform : Apply function column-by-column to the GroupBy object.
Series.apply : Apply a function to a Series.
DataFrame.apply : Apply a function to each row or column of a DataFrame.
""",
dataframe_examples="""
>>> df = pd.DataFrame({'A': 'a a b'.split(),
'B': [1,2,3],
'C': [4,6, 5]})
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: below the function passed to `apply` takes a DataFrame as
its argument and returns a DataFrame. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x / x.sum())
B C
0 0.333333 0.4
1 0.666667 0.6
2 1.000000 1.0
Example 2: The function passed to `apply` takes a DataFrame as
its argument and returns a Series. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x.max() - x.min())
B C
A
a 1 2
b 0 0
Example 3: The function passed to `apply` takes a DataFrame as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.C.max() - x.B.min())
A
a 5
b 2
dtype: int64
""",
series_examples="""
>>> s = pd.Series([0, 1, 2], index='a a b'.split())
>>> g = s.groupby(s.index)
From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: The function passed to `apply` takes a Series as
its argument and returns a Series. `apply` combines the result for
each group together into a new Series:
>>> g.apply(lambda x: x*2 if x.name == 'b' else x/2)
0 0.0
1 0.5
2 4.0
dtype: float64
Example 2: The function passed to `apply` takes a Series as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.max() - x.min())
a 1
b 0
dtype: int64
Notes
-----
In the current implementation `apply` calls `func` twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
group.
Examples
--------
{examples}
""",
)
_pipe_template = """
Apply a function `func` with arguments to this %(klass)s object and return
the function's result.
%(versionadded)s
Use `.pipe` when you want to improve readability by chaining together
functions that expect Series, DataFrames, GroupBy or Resampler objects.
Instead of writing
>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c) # doctest: +SKIP
You can write
>>> (df.groupby('group')
... .pipe(f)
... .pipe(g, arg1=a)
... .pipe(h, arg2=b, arg3=c)) # doctest: +SKIP
which is much more readable.
Parameters
----------
func : callable or tuple of (callable, str)
Function to apply to this %(klass)s object or, alternatively,
a `(callable, data_keyword)` tuple where `data_keyword` is a
string indicating the keyword of `callable` that expects the
%(klass)s object.
args : iterable, optional
Positional arguments passed into `func`.
kwargs : dict, optional
A dictionary of keyword arguments passed into `func`.
Returns
-------
object : the return type of `func`.
See Also
--------
Series.pipe : Apply a function with arguments to a series.
DataFrame.pipe: Apply a function with arguments to a dataframe.
apply : Apply function to each group instead of to the
full %(klass)s object.
Notes
-----
See more `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#piping-function-calls>`_
Examples
--------
%(examples)s
"""
_transform_template = """
Call function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each group.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optionally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default 'cython'
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
%(klass)s
See Also
--------
%(klass)s.groupby.apply
%(klass)s.groupby.aggregate
%(klass)s.transform
Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.
The current implementation imposes three requirements on f:
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
For example, if `f` returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a DataFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
produce unexpected results.
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : ['one', 'one', 'two', 'three',
... 'two', 'two'],
... 'C' : [1, 5, 5, 2, 5, 5],
... 'D' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
C D
0 -1.154701 -0.577350
1 0.577350 0.000000
2 0.577350 1.154701
3 -1.154701 -1.000000
4 0.577350 -0.577350
5 0.577350 1.000000
Broadcast result of the transformation
>>> grouped.transform(lambda x: x.max() - x.min())
C D
0 4 6.0
1 3 8.0
2 4 6.0
3 3 8.0
4 4 6.0
5 3 8.0
"""
_agg_template = """
Aggregate using one or more operations over the specified axis.
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optionally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default 'cython'
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
%(klass)s
See Also
--------
%(klass)s.groupby.apply
%(klass)s.groupby.transform
%(klass)s.aggregate
Notes
-----
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
%(examples)s
"""
class GroupByPlot(PandasObject):
"""
Class implementing the .plot attribute for groupby objects.
"""
def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
def f(self):
return self.plot(*args, **kwargs)
f.__name__ = "plot"
return self._groupby.apply(f)
def __getattr__(self, name: str):
def attr(*args, **kwargs):
def f(self):
return getattr(self.plot, name)(*args, **kwargs)
return self._groupby.apply(f)
return attr
@contextmanager
def _group_selection_context(groupby):
"""
Set / reset the _group_selection_context.
"""
groupby._set_group_selection()
yield groupby
groupby._reset_group_selection()
_KeysArgType = Union[
Hashable,
List[Hashable],
Callable[[Hashable], Hashable],
List[Callable[[Hashable], Hashable]],
Mapping[Hashable, Hashable],
]
class _GroupBy(PandasObject, SelectionMixin, Generic[FrameOrSeries]):
_group_selection = None
_apply_whitelist: FrozenSet[str] = frozenset()
def __init__(
self,
obj: FrameOrSeries,
keys: Optional[_KeysArgType] = None,
axis: int = 0,
level=None,
grouper: "Optional[ops.BaseGrouper]" = None,
exclusions=None,
selection=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = False,
observed: bool = False,
mutated: bool = False,
dropna: bool = True,
):
self._selection = selection
assert isinstance(obj, NDFrame), type(obj)
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError("as_index=False only valid with DataFrame")
if axis != 0:
raise ValueError("as_index=False only valid for axis=0")
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
self.observed = observed
self.mutated = mutated
self.dropna = dropna
if grouper is None:
from pandas.core.groupby.grouper import get_grouper
grouper, exclusions, obj = get_grouper(
obj,
keys,
axis=axis,
level=level,
sort=sort,
observed=observed,
mutated=self.mutated,
dropna=self.dropna,
)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self) -> int:
return len(self.groups)
def __repr__(self) -> str:
# TODO: Better repr for GroupBy object
return object.__repr__(self)
def _assure_grouper(self):
"""
We create the grouper on instantiation sub-classes may have a
different policy.
"""
pass
@property
def groups(self):
"""
Dict {group name -> group labels}.
"""
self._assure_grouper()
return self.grouper.groups
@property
def ngroups(self):
self._assure_grouper()
return self.grouper.ngroups
@property
def indices(self):
"""
Dict {group name -> group indices}.
"""
self._assure_grouper()
return self.grouper.indices
def _get_indices(self, names):
"""
Safe get multiple indices, translate keys for
datelike to underlying repr.
"""
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, datetime.datetime):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = "must supply a tuple to get_group with multiple grouping keys"
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError as err:
# turns out it wasn't a tuple
msg = (
"must supply a same-length tuple to get_group "
"with multiple grouping keys"
)
raise ValueError(msg) from err
converters = [get_converter(s) for s in index_sample]
names = (tuple(f(n) for f, n in zip(converters, name)) for name in names)
else:
converter = get_converter(index_sample)
names = (converter(name) for name in names)
return [self.indices.get(name, []) for name in names]
def _get_index(self, name):
"""
Safe get index, translate keys for datelike to underlying repr.
"""
return self._get_indices([name])[0]
@cache_readonly
def _selected_obj(self):
# Note: _selected_obj is always just `self.obj` for SeriesGroupBy
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _reset_group_selection(self):
"""
Clear group based selection.
Used for methods needing to return info on each group regardless of
whether a group selection was previously set.
"""
if self._group_selection is not None:
# GH12839 clear cached selection too when changing group selection
self._group_selection = None
self._reset_cache("_selected_obj")
def _set_group_selection(self):
"""
Create group based selection.
Used when selection is not passed directly but instead via a grouper.
NOTE: this should be paired with a call to _reset_group_selection
"""
grp = self.grouper
if not (
self.as_index
and getattr(grp, "groupings", None) is not None
and self.obj.ndim > 1
and self._group_selection is None
):
return
ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings if g.level is None and g.in_axis]
if len(groupers):
# GH12839 clear selected obj cache when group selection changes
self._group_selection = ax.difference(Index(groupers), sort=False).tolist()
self._reset_cache("_selected_obj")
def _set_result_index_ordered(self, result):
# set the result index on the passed values object and
# return the new object, xref 8046
# the values/counts are repeated according to the group index
# shortcut if we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatenate(self._get_indices(self.grouper.result_index)))
result.set_axis(index, axis=self.axis, inplace=True)
result = result.sort_index(axis=self.axis)
result.set_axis(self.obj._get_axis(self.axis), axis=self.axis, inplace=True)
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._apply_whitelist
def __getattr__(self, attr: str):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{attr}'"
)
@Substitution(
klass="GroupBy",
versionadded=".. versionadded:: 0.21.0",
examples="""\
>>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})
>>> df
A B
0 a 1
1 b 2
2 a 3
3 b 4
To get the difference between each groups maximum and minimum value in one
pass, you can do
>>> df.groupby('A').pipe(lambda x: x.max() - x.min())
B
A
a 2
b 2""",
)
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
plot = property(GroupByPlot)
def _make_wrapper(self, name):
assert name in self._apply_whitelist
self._set_group_selection()
# need to setup the selection
# as are not passed directly but in the grouper
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
sig = inspect.signature(f)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
if "axis" in sig.parameters:
if kwargs.get("axis", None) is None:
kwargs["axis"] = self.axis
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in base.plotting_methods:
return self.apply(curried)
try:
return self.apply(curried)
except TypeError as err:
if not re.search(
"reduction operation '.*' not allowed for this dtype", str(err)
):
# We don't have a cython implementation
# TODO: is the above comment accurate?
raise
if self.obj.ndim == 1:
# this can be called recursively, so need to raise ValueError
raise ValueError
# GH#3688 try to operate item-by-item
result = self._aggregate_item_by_item(name, *args, **kwargs)
return result
wrapper.__name__ = name
return wrapper
def get_group(self, name, obj=None):
"""
Construct DataFrame from group with provided name.
Parameters
----------
name : object
The name of the group to get as a DataFrame.
obj : DataFrame, default None
The DataFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used.
Returns
-------
group : same type as obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
if not len(inds):
raise KeyError(name)
return obj._take_with_is_copy(inds, axis=self.axis)
def __iter__(self):
"""
Groupby iterator.
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
@Appender(
_apply_docs["template"].format(
input="dataframe", examples=_apply_docs["dataframe_examples"]
)
)
def apply(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
# resolve functions to their callable functions prior, this
# wouldn't be needed
if args or kwargs:
if callable(func):
@wraps(func)
def f(g):
with np.errstate(all="ignore"):
return func(g, *args, **kwargs)
elif hasattr(nanops, "nan" + func):
# TODO: should we wrap this in to e.g. _is_builtin_func?
f = getattr(nanops, "nan" + func)
else:
raise ValueError(
"func must be a callable if args or kwargs are supplied"
)
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context("mode.chained_assignment", None):
try:
result = self._python_apply_general(f)
except TypeError:
# gh-20949
# try again, with .apply acting as a filtering
# operation, by excluding the grouping column
# This would normally not be triggered
# except if the udf is trying an operation that
# fails on *some* columns, e.g. a numeric operation
# on a string grouper column
with _group_selection_context(self):
return self._python_apply_general(f)
return result
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj, self.axis)
return self._wrap_applied_output(
keys, values, not_indexed_same=mutated or self.mutated
)
def _iterate_slices(self) -> Iterable[Series]:
raise AbstractMethodError(self)
def transform(self, func, *args, **kwargs):
raise AbstractMethodError(self)
def _cumcount_array(self, ascending: bool = True):
"""
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Notes
-----
this is currently implementing sort=False
(though the default is sort=True) for groupby in general
"""
ids, _, ngroups = self.grouper.group_info
sorter = get_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], len(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumsum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev].astype(np.int64, copy=False)
def _transform_should_cast(self, func_nm: str) -> bool:
"""
Parameters
----------
func_nm: str
The name of the aggregation function being performed
Returns
-------
bool
Whether transform should attempt to cast the result of aggregation
"""
return (self.size().fillna(0) > 0).any() and (
func_nm not in base.cython_cast_blacklist
)
def _cython_transform(self, how: str, numeric_only: bool = True, **kwargs):
output: Dict[base.OutputKey, np.ndarray] = {}
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, _ = self.grouper.transform(obj.values, how, **kwargs)
except NotImplementedError:
continue
if self._transform_should_cast(how):
result = maybe_cast_result(result, obj, how=how)
key = base.OutputKey(label=name, position=idx)
output[key] = result
if len(output) == 0:
raise DataError("No numeric types to aggregate")
return self._wrap_transformed_output(output)
def _wrap_aggregated_output(self, output: Mapping[base.OutputKey, np.ndarray]):
raise AbstractMethodError(self)
def _wrap_transformed_output(self, output: Mapping[base.OutputKey, np.ndarray]):
raise AbstractMethodError(self)
def _wrap_applied_output(self, keys, values, not_indexed_same: bool = False):
raise AbstractMethodError(self)
def _cython_agg_general(
self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1
):
output: Dict[base.OutputKey, Union[np.ndarray, DatetimeArray]] = {}
# Ideally we would be able to enumerate self._iterate_slices and use
# the index from enumeration as the key of output, but ohlc in particular
# returns a (n x 4) array. Output requires 1D ndarrays as values, so we
# need to slice that up into 1D arrays
idx = 0
for obj in self._iterate_slices():
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
result, agg_names = self.grouper.aggregate(
obj._values, how, min_count=min_count
)
if agg_names:
# e.g. ohlc
assert len(agg_names) == result.shape[1]
for result_column, result_name in zip(result.T, agg_names):
key = base.OutputKey(label=result_name, position=idx)
output[key] = maybe_cast_result(result_column, obj, how=how)
idx += 1
else:
assert result.ndim == 1
key = base.OutputKey(label=name, position=idx)
output[key] = maybe_cast_result(result, obj, how=how)
idx += 1
if len(output) == 0:
raise DataError("No numeric types to aggregate")
return self._wrap_aggregated_output(output)
def _python_agg_general(
self, func, *args, engine="cython", engine_kwargs=None, **kwargs
):
func = self._is_builtin_func(func)
if engine != "numba":
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output: Dict[base.OutputKey, np.ndarray] = {}
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
if self.grouper.ngroups == 0:
# agg_series below assumes ngroups > 0
continue
if engine == "numba":
result, counts = self.grouper.agg_series(
obj,
func,
*args,
engine=engine,
engine_kwargs=engine_kwargs,
**kwargs,
)
else:
try:
# if this function is invalid for this dtype, we will ignore it.
result, counts = self.grouper.agg_series(obj, f)
except TypeError:
continue
assert result is not None
key = base.OutputKey(label=name, position=idx)
output[key] = maybe_cast_result(result, obj, numeric_only=True)
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for key, result in output.items():
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = ensure_float(values)
output[key] = maybe_cast_result(values[mask], result)
return self._wrap_aggregated_output(output)
def _concat_objects(self, keys, values, not_indexed_same: bool = False):
from pandas.core.reshape.concat import concat
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
for v in com.not_none(*values):
ax = v._get_axis(self.axis)
ax._reset_identity()
return values
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
# this is a very unfortunate situation
# we can't use reindex to restore the original order
# when the ax has duplicates
# so we resort to this
# GH 14776, 30667
if ax.has_duplicates:
indexer, _ = result.index.get_indexer_non_unique(ax.values)
indexer = algorithms.unique1d(indexer)
result = result.take(indexer, axis=self.axis)
else:
result = result.reindex(ax, axis=self.axis)
elif self.group_keys:
values = reset_identity(values)
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(
values,
axis=self.axis,
keys=group_keys,
levels=group_levels,
names=group_names,
sort=False,
)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
values = reset_identity(values)
result = concat(values, axis=self.axis)
if isinstance(result, Series) and self._selection_name is not None:
result.name = self._selection_name
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = np.array([], dtype="int64")
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
# To track operations that expand dimensions, like ohlc
OutputFrameOrSeries = TypeVar("OutputFrameOrSeries", bound=NDFrame)
class GroupBy(_GroupBy[FrameOrSeries]):
"""
Class for grouping and aggregating relational data.
See aggregate, transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : str
Most users should ignore this
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
"""
@property
def _obj_1d_constructor(self) -> Type["Series"]:
# GH28330 preserve subclassed Series/DataFrames
if isinstance(self.obj, DataFrame):
return self.obj._constructor_sliced
assert isinstance(self.obj, Series)
return self.obj._constructor
def _bool_agg(self, val_test, skipna):
"""
Shared func to call any / all Cython GroupBy implementations.
"""
def objs_to_bool(vals: np.ndarray) -> Tuple[np.ndarray, Type]:
if is_object_dtype(vals):
vals = np.array([bool(x) for x in vals])
else:
vals = vals.astype(np.bool)
return vals.view(np.uint8), np.bool
def result_to_bool(result: np.ndarray, inference: Type) -> np.ndarray:
return result.astype(inference, copy=False)
return self._get_cythonized_result(
"group_any_all",
aggregate=True,
cython_dtype=np.dtype(np.uint8),
needs_values=True,
needs_mask=True,
pre_processing=objs_to_bool,
post_processing=result_to_bool,
val_test=val_test,
skipna=skipna,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def any(self, skipna: bool = True):
"""
Return True if any value in the group is truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
bool
"""
return self._bool_agg("any", skipna)
@Substitution(name="groupby")
@Appender(_common_see_also)
def all(self, skipna: bool = True):
"""
Return True if all values in the group are truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
bool
"""
return self._bool_agg("all", skipna)
@Substitution(name="groupby")
@Appender(_common_see_also)
def count(self):
"""
Compute count of group, excluding missing values.
Returns
-------
Series or DataFrame
Count of values within each group.
"""
# defined here for API doc
raise NotImplementedError
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def mean(self, numeric_only: bool = True):
"""
Compute mean of groups, excluding missing values.
Parameters
----------
numeric_only : bool, default True
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
Returns
-------
pandas.Series or pandas.DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
Groupby one column and return the mean of the remaining columns in
each group.
>>> df.groupby('A').mean()
B C
A
1 3.0 1.333333
2 4.0 1.500000
Groupby two columns and return the mean of the remaining column.
>>> df.groupby(['A', 'B']).mean()
C
A B
1 2.0 2
4.0 1
2 3.0 1
5.0 2
Groupby one column and return the mean of only particular column in
the group.
>>> df.groupby('A')['B'].mean()
A
1 3.0
2 4.0
Name: B, dtype: float64
"""
return self._cython_agg_general(
"mean",
alt=lambda x, axis: Series(x).mean(numeric_only=numeric_only),
numeric_only=numeric_only,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def median(self, numeric_only=True):
"""
Compute median of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
numeric_only : bool, default True
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
Returns
-------
Series or DataFrame
Median of values within each group.
"""
return self._cython_agg_general(
"median",
alt=lambda x, axis: Series(x).median(axis=axis, numeric_only=numeric_only),
numeric_only=numeric_only,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def std(self, ddof: int = 1):
"""
Compute standard deviation of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Standard deviation of values within each group.
"""
# TODO: implement at Cython level?
return np.sqrt(self.var(ddof=ddof))
@Substitution(name="groupby")
@Appender(_common_see_also)
def var(self, ddof: int = 1):
"""
Compute variance of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Variance of values within each group.
"""
if ddof == 1:
return self._cython_agg_general(
"var", alt=lambda x, axis: Series(x).var(ddof=ddof)
)
else:
func = lambda x: x.var(ddof=ddof)
with _group_selection_context(self):
return self._python_agg_general(func)
@Substitution(name="groupby")
@Appender(_common_see_also)
def sem(self, ddof: int = 1):
"""
Compute standard error of the mean of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Standard error of the mean of values within each group.
"""
return self.std(ddof=ddof) / np.sqrt(self.count())
@Substitution(name="groupby")
@Appender(_common_see_also)
def size(self):
"""
Compute group sizes.
Returns
-------
Series
Number of rows in each group.
"""
result = self.grouper.size()
# GH28330 preserve subclassed Series/DataFrames through calls
if issubclass(self.obj._constructor, Series):
result = self._obj_1d_constructor(result, name=self.obj.name)
else:
result = self._obj_1d_constructor(result)
return self._reindex_output(result, fill_value=0)
@classmethod
def _add_numeric_operations(cls):
"""
Add numeric operations to the GroupBy generically.
"""
def groupby_function(
name: str,
alias: str,
npfunc,
numeric_only: bool = True,
min_count: int = -1,
):
_local_template = """
Compute %(f)s of group values.
Parameters
----------
numeric_only : bool, default %(no)s
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
min_count : int, default %(mc)s
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
Returns
-------
Series or DataFrame
Computed %(f)s of values within each group.
"""
@Substitution(name="groupby", f=name, no=numeric_only, mc=min_count)
@Appender(_common_see_also)
@Appender(_local_template)
def func(self, numeric_only=numeric_only, min_count=min_count):
self._set_group_selection()
# try a cython aggregation if we can
try:
return self._cython_agg_general(
how=alias,
alt=npfunc,
numeric_only=numeric_only,
min_count=min_count,
)
except DataError:
pass
except NotImplementedError as err:
if "function is not implemented for this dtype" in str(
err
) or "category dtype not supported" in str(err):
# raised in _get_cython_function, in some cases can
# be trimmed by implementing cython funcs for more dtypes
pass
else:
raise
# apply a non-cython aggregation
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
return result
set_function_name(func, name, cls)
return func
def first_compat(obj: FrameOrSeries, axis: int = 0):
def first(x: Series):
x = x.array[notna(x.array)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(obj, DataFrame):
return obj.apply(first, axis=axis)
elif isinstance(obj, Series):
return first(obj)
else:
raise TypeError(type(obj))
def last_compat(obj: FrameOrSeries, axis: int = 0):
def last(x: Series):
x = x.array[notna(x.array)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(obj, DataFrame):
return obj.apply(last, axis=axis)
elif isinstance(obj, Series):
return last(obj)
else:
raise TypeError(type(obj))
cls.sum = groupby_function("sum", "add", np.sum, min_count=0)
cls.prod = groupby_function("prod", "prod", np.prod, min_count=0)
cls.min = groupby_function("min", "min", np.min, numeric_only=False)
cls.max = groupby_function("max", "max", np.max, numeric_only=False)
cls.first = groupby_function("first", "first", first_compat, numeric_only=False)
cls.last = groupby_function("last", "last", last_compat, numeric_only=False)
@Substitution(name="groupby")
@Appender(_common_see_also)
def ohlc(self) -> DataFrame:
"""
Compute open, high, low and close values of a group, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Returns
-------
DataFrame
Open, high, low and close values within each group.
"""
return self._apply_to_column_groupbys(lambda x: x._cython_agg_general("ohlc"))
@doc(DataFrame.describe)
def describe(self, **kwargs):
with _group_selection_context(self):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
def resample(self, rule, *args, **kwargs):
"""
Provide resampling when using a TimeGrouper.
Given a grouper, the function resamples it according to a string
"string" -> "frequency".
See the :ref:`frequency aliases <timeseries.offset_aliases>`
documentation for more details.
Parameters
----------
rule : str or DateOffset
The offset string or object representing target grouper conversion.
*args, **kwargs
Possible arguments are `how`, `fill_method`, `limit`, `kind` and
`on`, and other arguments of `TimeGrouper`.
Returns
-------
Grouper
Return a new grouper with our resampler appended.
See Also
--------
Grouper : Specify a frequency to resample with when
grouping by a key.
DatetimeIndex.resample : Frequency conversion and resampling of
time series.
Examples
--------
>>> idx = pd.date_range('1/1/2000', periods=4, freq='T')
>>> df = pd.DataFrame(data=4 * [range(2)],
... index=idx,
... columns=['a', 'b'])
>>> df.iloc[2, 0] = 5
>>> df
a b
2000-01-01 00:00:00 0 1
2000-01-01 00:01:00 0 1
2000-01-01 00:02:00 5 1
2000-01-01 00:03:00 0 1
Downsample the DataFrame into 3 minute bins and sum the values of
the timestamps falling into a bin.
>>> df.groupby('a').resample('3T').sum()
a b
a
0 2000-01-01 00:00:00 0 2
2000-01-01 00:03:00 0 1
5 2000-01-01 00:00:00 5 1
Upsample the series into 30 second bins.
>>> df.groupby('a').resample('30S').sum()
a b
a
0 2000-01-01 00:00:00 0 1
2000-01-01 00:00:30 0 0
2000-01-01 00:01:00 0 1
2000-01-01 00:01:30 0 0
2000-01-01 00:02:00 0 0
2000-01-01 00:02:30 0 0
2000-01-01 00:03:00 0 1
5 2000-01-01 00:02:00 5 1
Resample by month. Values are assigned to the month of the period.
>>> df.groupby('a').resample('M').sum()
a b
a
0 2000-01-31 0 3
5 2000-01-31 5 1
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> df.groupby('a').resample('3T', closed='right').sum()
a b
a
0 1999-12-31 23:57:00 0 1
2000-01-01 00:00:00 0 2
5 2000-01-01 00:00:00 5 1
Downsample the series into 3 minute bins and close the right side of
the bin interval, but label each bin using the right edge instead of
the left.
>>> df.groupby('a').resample('3T', closed='right', label='right').sum()
a b
a
0 2000-01-01 00:00:00 0 1
2000-01-01 00:03:00 0 2
5 2000-01-01 00:03:00 5 1
"""
from pandas.core.resample import get_resampler_for_grouping
return get_resampler_for_grouping(self, rule, *args, **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def rolling(self, *args, **kwargs):
"""
Return a rolling grouper, providing rolling functionality per group.
"""
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def expanding(self, *args, **kwargs):
"""
Return an expanding grouper, providing expanding
functionality per group.
"""
from pandas.core.window import ExpandingGroupby
return ExpandingGroupby(self, *args, **kwargs)
def _fill(self, direction, limit=None):
"""
Shared function for `pad` and `backfill` to call Cython method.
Parameters
----------
direction : {'ffill', 'bfill'}
Direction passed to underlying Cython function. `bfill` will cause
values to be filled backwards. `ffill` and any other values will
default to a forward fill
limit : int, default None
Maximum number of consecutive values to fill. If `None`, this
method will convert to -1 prior to passing to Cython
Returns
-------
`Series` or `DataFrame` with filled values
See Also
--------
pad
backfill
"""
# Need int value for Cython
if limit is None:
limit = -1
return self._get_cythonized_result(
"group_fillna_indexer",
needs_mask=True,
cython_dtype=np.dtype(np.int64),
result_is_index=True,
direction=direction,
limit=limit,
)
@Substitution(name="groupby")
def pad(self, limit=None):
"""
Forward fill the values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series or DataFrame
Object with missing values filled.
See Also
--------
Series.pad
DataFrame.pad
Series.fillna
DataFrame.fillna
"""
return self._fill("ffill", limit=limit)
ffill = pad
@Substitution(name="groupby")
def backfill(self, limit=None):
"""
Backward fill the values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series or DataFrame
Object with missing values filled.
See Also
--------
Series.backfill
DataFrame.backfill
Series.fillna
DataFrame.fillna
"""
return self._fill("bfill", limit=limit)
bfill = backfill
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def nth(self, n: Union[int, List[int]], dropna: Optional[str] = None) -> DataFrame:
"""
Take the nth row from each group if n is an int, or a subset of rows
if n is a list of ints.
If dropna, will take the nth non-null row, dropna is either
'all' or 'any'; this is equivalent to calling dropna(how=dropna)
before the groupby.
Parameters
----------
n : int or list of ints
A single nth value for the row or a list of nth values.
dropna : None or str, optional
Apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'.
Returns
-------
Series or DataFrame
N-th value within each group.
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
B
A
1 NaN
2 3.0
>>> g.nth(1)
B
A
1 2.0
2 5.0
>>> g.nth(-1)
B
A
1 4.0
2 5.0
>>> g.nth([0, 1])
B
A
1 NaN
1 2.0
2 3.0
2 5.0
Specifying `dropna` allows count ignoring ``NaN``
>>> g.nth(0, dropna='any')
B
A
1 2.0
2 3.0
NaNs denote group exhausted when using dropna
>>> g.nth(3, dropna='any')
B
A
1 NaN
2 NaN
Specifying `as_index=False` in `groupby` keeps the original index.
>>> df.groupby('A', as_index=False).nth(1)
A B
1 1 2.0
4 2 5.0
"""
valid_containers = (set, list, tuple)
if not isinstance(n, (valid_containers, int)):
raise TypeError("n needs to be an int or a list/set/tuple of ints")
if not dropna:
if isinstance(n, int):
nth_values = [n]
elif isinstance(n, valid_containers):
nth_values = list(set(n))
nth_array = np.array(nth_values, dtype=np.intp)
self._set_group_selection()
mask_left = np.in1d(self._cumcount_array(), nth_array)
mask_right = np.in1d(self._cumcount_array(ascending=False) + 1, -nth_array)
mask = mask_left | mask_right
ids, _, _ = self.grouper.group_info
# Drop NA values in grouping
mask = mask & (ids != -1)
out = self._selected_obj[mask]
if not self.as_index:
return out
result_index = self.grouper.result_index
out.index = result_index[ids[mask]]
if not self.observed and isinstance(result_index, CategoricalIndex):
out = out.reindex(result_index)
out = self._reindex_output(out)
return out.sort_index() if self.sort else out
# dropna is truthy
if isinstance(n, valid_containers):
raise ValueError("dropna option with a list of nth values is not supported")
if dropna not in ["any", "all"]:
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError(
"For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
f"(was passed {dropna})."
)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else -1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available
# (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
else:
# create a grouper with the original parameters, but on dropped
# object
from pandas.core.groupby.grouper import get_grouper
grouper, _, _ = get_grouper(
dropped,
key=self.keys,
axis=self.axis,
level=self.level,
sort=self.sort,
mutated=self.mutated,
)
grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)
sizes, result = grb.size(), grb.nth(n)
mask = (sizes < max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or len(result) == len(
self.grouper.result_index
):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
def quantile(self, q=0.5, interpolation: str = "linear"):
"""
Return group values at the given quantile, a la numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value(s) between 0 and 1 providing the quantile(s) to compute.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Method to use when the desired quantile falls between two points.
Returns
-------
Series or DataFrame
Return type determined by caller of GroupBy object.
See Also
--------
Series.quantile : Similar method for Series.
DataFrame.quantile : Similar method for DataFrame.
numpy.percentile : NumPy method to compute qth percentile.
Examples
--------
>>> df = pd.DataFrame([
... ['a', 1], ['a', 2], ['a', 3],
... ['b', 1], ['b', 3], ['b', 5]
... ], columns=['key', 'val'])
>>> df.groupby('key').quantile()
val
key
a 2.0
b 3.0
"""
from pandas import concat
def pre_processor(vals: np.ndarray) -> Tuple[np.ndarray, Optional[Type]]:
if is_object_dtype(vals):
raise TypeError(
"'quantile' cannot be performed against 'object' dtypes!"
)
inference = None
if is_integer_dtype(vals.dtype):
if is_extension_array_dtype(vals.dtype):
vals = vals.to_numpy(dtype=float, na_value=np.nan)
inference = np.int64
elif is_bool_dtype(vals.dtype) and is_extension_array_dtype(vals.dtype):
vals = vals.to_numpy(dtype=float, na_value=np.nan)
elif is_datetime64_dtype(vals.dtype):
inference = "datetime64[ns]"
vals = np.asarray(vals).astype(np.float)
return vals, inference
def post_processor(vals: np.ndarray, inference: Optional[Type]) -> np.ndarray:
if inference:
# Check for edge case
if not (
is_integer_dtype(inference)
and interpolation in {"linear", "midpoint"}
):
vals = vals.astype(inference)
return vals
if is_scalar(q):
return self._get_cythonized_result(
"group_quantile",
aggregate=True,
needs_values=True,
needs_mask=True,
cython_dtype=np.dtype(np.float64),
pre_processing=pre_processor,
post_processing=post_processor,
q=q,
interpolation=interpolation,
)
else:
results = [
self._get_cythonized_result(
"group_quantile",
aggregate=True,
needs_values=True,
needs_mask=True,
cython_dtype=np.dtype(np.float64),
pre_processing=pre_processor,
post_processing=post_processor,
q=qi,
interpolation=interpolation,
)
for qi in q
]
result = concat(results, axis=0, keys=q)
# fix levels to place quantiles on the inside
# TODO(GH-10710): Ideally, we could write this as
# >>> result.stack(0).loc[pd.IndexSlice[:, ..., q], :]
# but this hits https://github.com/pandas-dev/pandas/issues/10710
# which doesn't reorder the list-like `q` on the inner level.
order = list(range(1, result.index.nlevels)) + [0]
# temporarily saves the index names
index_names = np.array(result.index.names)
# set index names to positions to avoid confusion
result.index.names = np.arange(len(index_names))
# place quantiles on the inside
result = result.reorder_levels(order)
# restore the index names in order
result.index.names = index_names[order]
# reorder rows to keep things sorted
indices = np.arange(len(result)).reshape([len(q), self.ngroups]).T.flatten()
return result.take(indices)
@Substitution(name="groupby")
def ngroup(self, ascending: bool = True):
"""
Number each group from 0 to the number of groups - 1.
This is the enumerative complement of cumcount. Note that the
numbers given to the groups match the order in which the groups
would be seen when iterating over the groupby object, not the
order they are first observed.
Parameters
----------
ascending : bool, default True
If False, number in reverse, from number of group - 1 to 0.
Returns
-------
Series
Unique numbers for each group.
See Also
--------
.cumcount : Number the rows in each group.
Examples
--------
>>> df = pd.DataFrame({"A": list("aaabba")})
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').ngroup()
0 0
1 0
2 0
3 1
4 1
5 0
dtype: int64
>>> df.groupby('A').ngroup(ascending=False)
0 1
1 1
2 1
3 0
4 0
5 1
dtype: int64
>>> df.groupby(["A", [1,1,2,3,2,1]]).ngroup()
0 0
1 0
2 1
3 3
4 2
5 0
dtype: int64
"""
with _group_selection_context(self):
index = self._selected_obj.index
result = self._obj_1d_constructor(self.grouper.group_info[0], index)
if not ascending:
result = self.ngroups - 1 - result
return result
@Substitution(name="groupby")
def cumcount(self, ascending: bool = True):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
.. code-block:: python
self.apply(lambda x: pd.Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Returns
-------
Series
Sequence number of each element within each group.
See Also
--------
.ngroup : Number the groups themselves.
Examples
--------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
with _group_selection_context(self):
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return self._obj_1d_constructor(cumcounts, index)
@Substitution(name="groupby")
@Appender(_common_see_also)
def rank(
self,
method: str = "average",
ascending: bool = True,
na_option: str = "keep",
pct: bool = False,
axis: int = 0,
):
"""
Provide the rank of values within each group.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
* average: average rank of group.
* min: lowest rank in group.
* max: highest rank in group.
* first: ranks assigned in order they appear in the array.
* dense: like 'min', but rank always increases by 1 between groups.
ascending : bool, default True
False for ranks by high (1) to low (N).
na_option : {'keep', 'top', 'bottom'}, default 'keep'
* keep: leave NA values where they are.
* top: smallest rank if ascending.
* bottom: smallest rank if descending.
pct : bool, default False
Compute percentage rank of data within each group.
axis : int, default 0
The axis of the object over which to compute the rank.
Returns
-------
DataFrame with ranking of values within each group
"""
if na_option not in {"keep", "top", "bottom"}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
return self._cython_transform(
"rank",
numeric_only=False,
ties_method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
axis=axis,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def cumprod(self, axis=0, *args, **kwargs):
"""
Cumulative product for each group.
Returns
-------
Series or DataFrame
"""
nv.validate_groupby_func("cumprod", args, kwargs, ["numeric_only", "skipna"])
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis, **kwargs))
return self._cython_transform("cumprod", **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def cumsum(self, axis=0, *args, **kwargs):
"""
Cumulative sum for each group.
Returns
-------
Series or DataFrame
"""
nv.validate_groupby_func("cumsum", args, kwargs, ["numeric_only", "skipna"])
if axis != 0:
return self.apply(lambda x: x.cumsum(axis=axis, **kwargs))
return self._cython_transform("cumsum", **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def cummin(self, axis=0, **kwargs):
"""
Cumulative min for each group.
Returns
-------
Series or DataFrame
"""
if axis != 0:
return self.apply(lambda x: np.minimum.accumulate(x, axis))
return self._cython_transform("cummin", numeric_only=False)
@Substitution(name="groupby")
@Appender(_common_see_also)
def cummax(self, axis=0, **kwargs):
"""
Cumulative max for each group.
Returns
-------
Series or DataFrame
"""
if axis != 0:
return self.apply(lambda x: np.maximum.accumulate(x, axis))
return self._cython_transform("cummax", numeric_only=False)
def _get_cythonized_result(
self,
how: str,
cython_dtype: np.dtype,
aggregate: bool = False,
needs_values: bool = False,
needs_mask: bool = False,
needs_ngroups: bool = False,
result_is_index: bool = False,
pre_processing=None,
post_processing=None,
**kwargs,
):
"""
Get result for Cythonized functions.
Parameters
----------
how : str, Cythonized function name to be called
cython_dtype : np.dtype
Type of the array that will be modified by the Cython call.
aggregate : bool, default False
Whether the result should be aggregated to match the number of
groups
needs_values : bool, default False
Whether the values should be a part of the Cython call
signature
needs_mask : bool, default False
Whether boolean mask needs to be part of the Cython call
signature
needs_ngroups : bool, default False
Whether number of groups is part of the Cython call signature
result_is_index : bool, default False
Whether the result of the Cython operation is an index of
values to be retrieved, instead of the actual values themselves
pre_processing : function, default None
Function to be applied to `values` prior to passing to Cython.
Function should return a tuple where the first element is the
values to be passed to Cython and the second element is an optional
type which the values should be converted to after being returned
by the Cython operation. Raises if `needs_values` is False.
post_processing : function, default None
Function to be applied to result of Cython function. Should accept
an array of values as the first argument and type inferences as its
second argument, i.e. the signature should be
(ndarray, Type).
**kwargs : dict
Extra arguments to be passed back to Cython funcs
Returns
-------
`Series` or `DataFrame` with filled values
"""
if result_is_index and aggregate:
raise ValueError("'result_is_index' and 'aggregate' cannot both be True!")
if post_processing:
if not callable(pre_processing):
raise ValueError("'post_processing' must be a callable!")
if pre_processing:
if not callable(pre_processing):
raise ValueError("'pre_processing' must be a callable!")
if not needs_values:
raise ValueError(
"Cannot use 'pre_processing' without specifying 'needs_values'!"
)
grouper = self.grouper
labels, _, ngroups = grouper.group_info
output: Dict[base.OutputKey, np.ndarray] = {}
base_func = getattr(libgroupby, how)
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
values = obj._values
if aggregate:
result_sz = ngroups
else:
result_sz = len(values)
result = np.zeros(result_sz, dtype=cython_dtype)
func = partial(base_func, result, labels)
inferences = None
if needs_values:
vals = values
if pre_processing:
vals, inferences = pre_processing(vals)
func = partial(func, vals)
if needs_mask:
mask = isna(values).view(np.uint8)
func = partial(func, mask)
if needs_ngroups:
func = partial(func, ngroups)
func(**kwargs) # Call func to modify indexer values in place
if result_is_index:
result = algorithms.take_nd(values, result)
if post_processing:
result = post_processing(result, inferences)
key = base.OutputKey(label=name, position=idx)
output[key] = result
if aggregate:
return self._wrap_aggregated_output(output)
else:
return self._wrap_transformed_output(output)
@Substitution(name="groupby")
def shift(self, periods=1, freq=None, axis=0, fill_value=None):
"""
Shift each group by periods observations.
If freq is passed, the index will be increased using the periods and the freq.
Parameters
----------
periods : int, default 1
Number of periods to shift.
freq : str, optional
Frequency string.
axis : axis to shift, default 0
Shift direction.
fill_value : optional
The scalar value to use for newly introduced missing values.
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Object shifted within each group.
See Also
--------
Index.shift : Shift values of Index.
tshift : Shift the time index, using the index’s frequency
if available.
"""
if freq is not None or axis != 0 or not isna(fill_value):
return self.apply(lambda x: x.shift(periods, freq, axis, fill_value))
return self._get_cythonized_result(
"group_shift_indexer",
cython_dtype=np.dtype(np.int64),
needs_ngroups=True,
result_is_index=True,
periods=periods,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, axis=0):
"""
Calculate pct_change of each value to previous entry in group.
Returns
-------
Series or DataFrame
Percentage changes within each group.
"""
if freq is not None or axis != 0:
return self.apply(
lambda x: x.pct_change(
periods=periods,
fill_method=fill_method,
limit=limit,
freq=freq,
axis=axis,
)
)
if fill_method is None: # GH30463
fill_method = "pad"
limit = 0
filled = getattr(self, fill_method)(limit=limit)
fill_grp = filled.groupby(self.grouper.codes)
shifted = fill_grp.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def head(self, n=5):
"""
Return first n rows of each group.
Similar to ``.apply(lambda x: x.head(n))``, but it returns a subset of rows
from the original DataFrame with original index and order preserved
(``as_index`` flag is ignored).
Does not work for negative values of `n`.
Returns
-------
Series or DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]],
... columns=['A', 'B'])
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(-1)
Empty DataFrame
Columns: [A, B]
Index: []
"""
self._reset_group_selection()
mask = self._cumcount_array() < n
return self._selected_obj[mask]
@Substitution(name="groupby")
@
|
Substitution(see_also=_common_see_also)
|
pandas.util._decorators.Substitution
|
import os
import sys
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
import numpy as np
import pickle
import random
import json
from collections import OrderedDict
import itertools as it
import pathos.multiprocessing as mp
import pandas as pd
from matplotlib import pyplot as plt
from src.algorithms.mcts import MCTS, ScoreChild, establishSoftmaxActionDist, SelectChild, backup, InitializeChildren, Expand, RollOut
import src.constrainedChasingEscapingEnv.envNoPhysics as env
import src.constrainedChasingEscapingEnv.reward as reward
from src.constrainedChasingEscapingEnv.policies import HeatSeekingContinuesDeterministicPolicy, HeatSeekingDiscreteDeterministicPolicy, stationaryAgentPolicy
from src.constrainedChasingEscapingEnv.state import GetAgentPosFromState
from src.constrainedChasingEscapingEnv.analyticGeometryFunctions import computeAngleBetweenVectors
from src.episode import chooseGreedyAction, SampleTrajectory
from exec.trajectoriesSaveLoad import GetSavePath, readParametersFromDf, LoadTrajectories, SaveAllTrajectories, \
GenerateAllSampleIndexSavePaths, saveToPickle, loadFromPickle
from exec.preProcessing import AccumulateRewards, AddValuesToTrajectory, RemoveTerminalTupleFromTrajectory, ActionToOneHot, ProcessTrajectoryForPolicyValueNet, PreProcessTrajectories
from src.constrainedChasingEscapingEnv.envNoPhysics import IsTerminal, TransiteForNoPhysics, Reset, StayInBoundaryByReflectVelocity
from src.neuralNetwork.policyValueResNet import GenerateModel, Train, saveVariables, sampleData, ApproximateValue, \
ApproximatePolicy, restoreVariables
import time
from exec.trajectoriesSaveLoad import GetSavePath, saveToPickle
from exec.evaluationFunctions import ComputeStatistics
class SampleTrajectoryFixRet:
def __init__(self, maxRunningSteps, transit, isTerminal, reset, chooseAction):
self.maxRunningSteps = maxRunningSteps
self.transit = transit
self.isTerminal = isTerminal
self.reset = reset
self.chooseAction = chooseAction
def __call__(self, policy, trialIndex):
state = self.reset(trialIndex)
while self.isTerminal(state):
state = self.reset(trialIndex)
trajectory = []
for runningStep in range(self.maxRunningSteps):
if self.isTerminal(state):
trajectory.append((state, None, None))
break
actionDists = policy(state)
action = [self.chooseAction(actionDist) for actionDist in actionDists]
trajectory.append((state, action, actionDists))
actionFortransit = [action[0], action[1][0], action[1][1]]
nextState = self.transit(state, actionFortransit)
state = nextState
return trajectory
def generateOneCondition(parameters):
print(parameters)
numTrials = 7
numSimulations = int(parameters['numSimulations'])
killzoneRadius = 30
maxRunningSteps = 100
fixedParameters = {'maxRunningSteps': maxRunningSteps, 'numSimulations': numSimulations, 'killzoneRadius': killzoneRadius, 'numTrials': numTrials}
trajectorySaveExtension = '.pickle'
dirName = os.path.dirname(__file__)
trajectoriesSaveDirectory = os.path.join(dirName, '..', '..', '..', 'data', 'evaluateEscapeSingleChasingNoPhysics', 'evaluateMCTSTBaseLineTajectories')
if not os.path.exists(trajectoriesSaveDirectory):
os.makedirs(trajectoriesSaveDirectory)
generateTrajectorySavePath = GetSavePath(trajectoriesSaveDirectory, trajectorySaveExtension, fixedParameters)
trajectorySavePath = generateTrajectorySavePath(parameters)
numOfAgent = 3
sheepId = 0
wolvesId = 1
wolfOneId = 1
wolfTwoId = 2
xPosIndex = [0, 1]
xBoundary = [0, 600]
yBoundary = [0, 600]
getSheepXPos = GetAgentPosFromState(sheepId, xPosIndex)
getWolfOneXPos = GetAgentPosFromState(wolfOneId, xPosIndex)
getWolfTwoXPos = GetAgentPosFromState(wolfTwoId, xPosIndex)
isTerminalOne = IsTerminal(getWolfOneXPos, getSheepXPos, killzoneRadius)
isTerminalTwo = IsTerminal(getWolfTwoXPos, getSheepXPos, killzoneRadius)
isTerminal = lambda state: isTerminalOne(state) or isTerminalTwo(state)
stayInBoundaryByReflectVelocity = StayInBoundaryByReflectVelocity(xBoundary, yBoundary)
transit = TransiteForNoPhysics(stayInBoundaryByReflectVelocity)
actionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0), (-7, -7), (0, -10), (7, -7), (0, 0)]
preyPowerRatio = 3
sheepActionSpace = list(map(tuple, np.array(actionSpace) * preyPowerRatio))
predatorPowerRatio = 2
wolfActionOneSpace = list(map(tuple, np.array(actionSpace) * predatorPowerRatio))
wolfActionTwoSpace = list(map(tuple, np.array(actionSpace) * predatorPowerRatio))
wolvesActionSpace = list(it.product(wolfActionOneSpace, wolfActionTwoSpace))
numSheepActionSpace = len(sheepActionSpace)
numWolvesActionSpace = len(wolvesActionSpace)
numStateSpace = 6
regularizationFactor = 1e-4
sharedWidths = [128]
actionLayerWidths = [128]
valueLayerWidths = [128]
generateSheepModel = GenerateModel(numStateSpace, numSheepActionSpace, regularizationFactor)
# load save dir
NNModelSaveExtension = ''
NNModelSaveDirectory = os.path.join(dirName, '..', '..', '..', 'data', 'evaluateEscapeMultiChasingNoPhysics', 'trainedResNNModelsMultiStillAction')
NNModelFixedParameters = {'agentId': 0, 'maxRunningSteps': 150, 'numSimulations': 200, 'miniBatchSize': 256, 'learningRate': 0.0001, }
getNNModelSavePath = GetSavePath(NNModelSaveDirectory, NNModelSaveExtension, NNModelFixedParameters)
depth = 5
resBlockSize = 2
dropoutRate = 0.0
initializationMethod = 'uniform'
initSheepNNModel = generateSheepModel(sharedWidths * depth, actionLayerWidths, valueLayerWidths, resBlockSize, initializationMethod, dropoutRate)
sheepTrainedModelPath = getNNModelSavePath({'trainSteps': 50000, 'depth': depth})
sheepTrainedModel = restoreVariables(initSheepNNModel, sheepTrainedModelPath)
sheepPolicy = ApproximatePolicy(sheepTrainedModel, sheepActionSpace)
# sheepPolicy = lambda state: {action: 1 / len(sheepActionSpace) for action in sheepActionSpace}
# select child
cInit = 1
cBase = 100
calculateScore = ScoreChild(cInit, cBase)
selectChild = SelectChild(calculateScore)
# prior
getActionPrior = lambda state: {action: 1 / len(wolvesActionSpace) for action in wolvesActionSpace}
# load chase nn policy
def wolvesTransit(state, action): return transit(
state, [chooseGreedyAction(sheepPolicy(state)), action[0], action[1]])
# reward function
aliveBonus = -1 / maxRunningSteps
deathPenalty = 1
rewardFunction = reward.RewardFunctionCompete(
aliveBonus, deathPenalty, isTerminal)
# initialize children; expand
initializeChildren = InitializeChildren(
wolvesActionSpace, wolvesTransit, getActionPrior)
expand = Expand(isTerminal, initializeChildren)
# random rollout policy
def rolloutPolicy(
state): return wolvesActionSpace[np.random.choice(range(numWolvesActionSpace))]
# rollout
rolloutHeuristicWeight = 0.1
rolloutHeuristic1 = reward.HeuristicDistanceToTarget(
rolloutHeuristicWeight, getWolfOneXPos, getSheepXPos)
rolloutHeuristic2 = reward.HeuristicDistanceToTarget(
rolloutHeuristicWeight, getWolfTwoXPos, getSheepXPos)
rolloutHeuristic = lambda state: (rolloutHeuristic1(state) + rolloutHeuristic2(state)) / 2
maxRolloutSteps = 10
rollout = RollOut(rolloutPolicy, maxRolloutSteps, wolvesTransit, rewardFunction, isTerminal, rolloutHeuristic)
wolfPolicy = MCTS(numSimulations, selectChild, expand, rollout, backup, establishSoftmaxActionDist)
# All agents' policies
policy = lambda state: [sheepPolicy(state), wolfPolicy(state)]
np.random.seed(1447)
initPositionList = [[env.samplePosition(xBoundary, yBoundary) for j in range(numOfAgent)] for i in range(numTrials)]
reset = env.FixedReset(initPositionList)
sampleTrajectory = SampleTrajectoryFixRet(maxRunningSteps, transit, isTerminal, reset, chooseGreedyAction)
startTime = time.time()
trajectories = [sampleTrajectory(policy, trial) for trial in range(numTrials)]
finshedTime = time.time() - startTime
saveToPickle(trajectories, trajectorySavePath)
print(parameters)
print('lenght:', np.mean([len(tra) for tra in trajectories]))
print('timeTaken:', finshedTime)
def main():
manipulatedVariables = OrderedDict()
manipulatedVariables['numSimulations'] = [50,100, 200, 400]
levelNames = list(manipulatedVariables.keys())
levelValues = list(manipulatedVariables.values())
modelIndex = pd.MultiIndex.from_product(levelValues, names=levelNames)
toSplitFrame =
|
pd.DataFrame(index=modelIndex)
|
pandas.DataFrame
|
"""
@author: <NAME>
Based on the Kaggle Dataset for fraud detection using transaction data
The following a Tensorflow-based program using a multi-layered neural network
to classify fraudulent transactions.
"""
import tensorflow as tf
import numpy as np
import pandas as pd
tf.logging.set_verbosity(tf.logging.INFO)
########################################################
###### Read in Files ######
########################################################
print("Reading in files...")
train_data_csv = 'creditcard_train.csv'
test_data_csv = 'creditcard_test.csv'
train_data = pd.read_csv(train_data_csv) # DataFrame object
trainX = train_data.drop(['Class'], axis='columns')
trainY = train_data.Class
train_encoded_labels = np.array(
|
pd.get_dummies(trainY)
|
pandas.get_dummies
|
# 2021-04-22 09:00
# elihei [<<EMAIL>>]
# /Volumes/Projects/MS_lesions/analysis/sma02_novosparc_run.py
import argparse
import json
import os
import sys
import numpy as np
import pandas as pd
import scanpy as sc
import novosparc
import matplotlib.pyplot as plt
from scipy.spatial.distance import cdist, squareform, pdist
from scipy.stats import ks_2samp
# currentdir = os.path.dirname(os.path.abspath(__file__))
# parentdir = os.path.dirname(currentdir)
# sys.path.insert(0,parentdir)
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/sma02_novosparc/', help='Data directory containing the expression matrix and the atlas.')
parser.add_argument('--out_dir', type=str, default='output/sma02_novosparc/', help='Output directory.')
parser.add_argument('--tag',type=str,default='EXP',help='The tag of the experiment (for saving).')
parser.add_argument('--atlas_locs_f', type=str,default='locs_atlas.txt', help='Path to the atlas locations file in the data directory. Should be in txt format.')
parser.add_argument('--atlas_mtx_f', type=str,default='mtx_atlas.txt', help='Path to the atlas marker expression matrix in the data directory. Should be in txt format.')
parser.add_argument('--expr_mtx_f',type=str,default='mtx_expr.txt',help='Path to the main expression matrix to be mapped to the atlas locations.')
parser.add_argument('--ncells',type=int,default=1000,help='Number of cells to be subsampled from the expression dataset.')
parser.add_argument('--nns',type=int,default=5,help='Num neighbors for cell-cell expression cost.')
parser.add_argument('--nnt',type=int,default=5,help='Num neighbors for location-location physical distance cost')
parser.add_argument('--alpha',type=float,default=0.8,help='The weight of the reference atlas.')
parser.add_argument('--epsilon',type=float,default=0.0005,help='Coefficient of entropy regularization')
parser.add_argument('--seed',type=int,default=0,help='Seed for generating the synthetic dataset.')
args = parser.parse_args()
target_space_path = os.path.join(args.data_dir, args.atlas_locs_f)
locations =
|
pd.read_csv(target_space_path, sep=',')
|
pandas.read_csv
|
#!/usr/bin/env python3
## Copyright (c) 2020 CSE-Lab, ETH Zurich, Switzerland. All rights reserved.
## Distributed under the terms of the MIT license.
from dataclasses import dataclass
import math
import numpy as np
import scipy.integrate as integrate
def compute_V(bmb: float,
cmb: float,
omega: float):
wc = cmb
if omega <= wc:
return bmb / cmb * omega
else:
# helper constant:
a1 = np.sqrt(omega**2 - wc**2)
a2 = np.arctan(wc / a1)
period = 2 * np.pi / a1
def integrand(t):
theta = 2 * np.arctan(wc / omega - a1 / omega * np.tan(0.5 * a1 * t - a2))
return np.sin(theta)
return bmb * integrate.quad(integrand, 0, period)[0] / period
@dataclass
class ABF:
# parameters of the shape
bmb: float
cmb: float
def velocity(self, omega: float):
return compute_V(self.bmb, self.cmb, omega)
def stepOutFrequency(self):
return self.cmb
class Swimmers:
def __init__(self):
self.dt = 1
self.t_max = 200
self.target_radius = 1
self.ABFs = [ABF(bmb=1, cmb=1), ABF(bmb=1, cmb=2)]
self.reset()
def reset(self):
self.step = 0
self.t = 0
self.positions = np.array([[15., 10.], [-5., 15.]])
self.prevDistance = self.getSumDistances()
self.trajectory = list() # trace for dumping
def isSuccess(self):
diatances = np.sqrt(np.sum(self.positions**2, axis=1))
return np.max(diatances) < self.target_radius
def isOver(self):
return self.isSuccess() or self.t >= self.t_max
def system(self, t, y, act):
wx, wy, omega = act
Vs = [a.velocity(omega) for a in self.ABFs]
u = np.array([wx, wy]) / np.sqrt(wx**2 + wy**2)
return np.array([ v * u for v in Vs])
def getSumDistances(self):
return np.sum(np.sqrt(np.sum(self.positions**2, axis=1)))
def advance(self, action):
self.prevDistance = self.getSumDistances()
self.action = np.array(action)
self.positions += self.dt * self.system(self.t, self.positions, action) # Forward Euler is exact here
self.t += self.dt
self.step += 1
self.trajectory.append(self.positions.flatten())
return self.isOver()
def getState(self):
return self.positions.flatten()
def getReward(self):
currDistance = self.getSumDistances()
r = -self.dt / self.t_max
r += self.prevDistance - currDistance # reward shaping
if self.isSuccess():
r += 10
return r
def getRemainingTime(self):
return self.t_max - self.t
def dumpTrajectoryToCsv(self, fname: str):
import pandas as pd
traj = np.array(self.trajectory)
data = dict()
for i in range(len(self.ABFs)):
data[f"x{i}"] = traj[:,2*i]
data[f"y{i}"] = traj[:,2*i+1]
df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 8 18:09:41 2019
@author: shuxinyu
"""
import numpy as np
import pandas as pd
import random
from sklearn.feature_selection import chi2, VarianceThreshold, SelectKBest
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.metrics import roc_auc_score
import warnings
warnings.filterwarnings('ignore')
AUC_record=pd.DataFrame()
for round_num in range(10):
result_concat=pd.DataFrame()
for test_num in [1,2,3,4]:
train_df = pd.read_csv('/home/liukang/Doc/valid_df/train_{}.csv'.format(test_num))
test_df = pd.read_csv('/home/liukang/Doc/valid_df/test_{}.csv'.format(test_num))
Lr = LogisticRegression(n_jobs=-1)
X_train, y_train = train_df.loc[:, :'CCS279'], train_df['Label']
X_test, y_test = test_df.loc[:, :'CCS279'], test_df['Label']
Lr.fit(X_train, y_train)
Wi = pd.DataFrame({'col_name':X_train.columns.tolist(), 'Feature_importance':Lr.coef_[0]})
train_data=train_df
train_data=train_data.sample(frac=1)
train_data.reset_index(drop=True,inplace=True)
test_data=test_df
test_data=test_data.sample(frac=1)
test_data.reset_index(drop=True,inplace=True)
train_X,train_y=train_data.loc[:, :'CCS279'], train_data['Label']
test_X,y_test=test_data.loc[:, :'CCS279'], test_data['Label']
test_y=
|
pd.DataFrame()
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.