prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# to estimate flood control voluse from ReGeom data
from datetime import datetime
from datetime import date
import os
import numpy as np
import pandas as pd
import sys
from dateutil.relativedelta import relativedelta
print(os.path.basename(__file__))
##### initial setting ------------------------------
tag = sys.argv[1]
dam_file = './'+tag+'/damloc_modified.csv'
## link
GRSADdir = "./inp/GRSAD/"
ReGeomdir = "./inp/ReGeom/"
ReGeom_ErrorFile = "./inp/ReGeom_Error.csv"
output_file = './'+tag+'/tmp_p03_fldsto.csv'
#### parameters to calculate flood control volume
pc = 75 ## percentile of surface area timeseries
s_yr, s_mon = 1984, 3
e_yr, e_mon = 2018, 12
#### read database --------------------------
grand = pd.read_csv(dam_file)
error = pd.read_csv(ReGeom_ErrorFile)
#### dam loop -----------------------------
cols = ['damid', 'damname', 'ave_area', 'fldsto_mcm', 'totalsto_mcm']
df_new = pd.DataFrame(index=[], columns=cols)
for i in range(len(grand)):
gr = grand.iloc[i:i+1]
nm = gr['damid'].values[0]
damname = gr['damname'].values[0]
totalsto = gr['totalsto_mcm'].values[0]
print('')
print('------')
print(nm, damname)
#if nm > 6820:
# continue
error_i = error.query('GRAND_ID == @nm')
## read timeseries file -----
grsadpath = GRSADdir + '/'+ str(nm) + '_intp'
if not os.path.isfile(grsadpath):
print('file not found: ' +str(grsadpath))
df_i = [nm, damname, np.nan, np.nan, totalsto]
df_i = pd.Series(df_i, index=df_new.columns)
df_new = df_new.append(df_i, ignore_index=True)
continue
import pandas as pd
df =
|
pd.read_table(grsadpath, index_col=0, parse_dates=True)
|
pandas.read_table
|
import pandas as pd
import omdb
from time import sleep
import re
API_KEY = '4d93e7c3'
omdb.set_default('apikey', API_KEY)
omdb.set_default('timeout', 2)
def create_new_features(feature, number_of_new_features, dataset):
mylist = dataset[feature].split(',')
dictionary = {}
for i in range(1,number_of_new_features+1):
if (number_of_new_features!=1):
key = feature+str(i)
else:
key = feature
if (i<=len(mylist)):
mylist[i-1] = re.sub(r'\([^)]*\)', '', mylist[i-1])
dictionary[key] = mylist[i-1].strip()
else:
dictionary[key] = 'N/A'
return (dictionary)
def prepare_dataset(dataset):
new_dataset = []
for movie in dataset:
movie_with_new_features = {}
movie['actor'] = movie['actors']
del movie['actors']
# create some new features
movie_with_new_features.update(create_new_features('actor', 2, movie))
movie_with_new_features.update(create_new_features('writer', 1, movie))
movie_with_new_features.update(create_new_features('director', 1, movie))
movie_with_new_features.update(create_new_features('genre', 1, movie))
movie_with_new_features.update(create_new_features('language', 1, movie))
movie_with_new_features.update(create_new_features('country', 1, movie))
# create target variable based on imdb rating
if (float(movie['imdb_rating']) > 5.0):
movie_with_new_features['Success_of_Movie'] = 'Success'
else:
movie_with_new_features['Success_of_Movie'] = 'Failure'
#copy some features
if (movie['released'] != 'N/A'):
movie_with_new_features['released'] = movie['released'].split()[2]
else:
movie_with_new_features['released'] = movie['released']
if (movie['runtime'] != 'N/A'):
movie_with_new_features['runtime'] = movie['runtime'].split()[0]
else:
movie_with_new_features['runtime'] = movie['runtime']
movie_with_new_features['id'] = movie['imdb_id']
new_dataset.append(movie_with_new_features)
return(new_dataset)
if __name__ == "__main__":
df_2006_2016 = pd.read_csv("id_2006_2016.csv", usecols=["tconst"])
df_2017 =
|
pd.read_csv("id_2017.csv", usecols=["tconst"])
|
pandas.read_csv
|
from django.test import TestCase
import pandas as pd
import numpy as np
import pickle
import django
from .models import (
DataFrame, WideTimeSeries, WideTimeSeriesDateField,
LongTimeSeries, PivotData, Dude, Car, Spot
)
try:
import pandas._testing as tm
except ImportError:
import pandas.util.testing as tm
import semver
PANDAS_VERSIONINFO = semver.VersionInfo.parse(pd.__version__)
class DataFrameTest(TestCase):
def setUp(self):
data = {
'col1': np.array([1, 2, 3, 5, 6, 5, 5]),
'col2': np.array([10.0, 2.4, 3.0, 5, 6, 5, 5]),
'col3': np.array([9.5, 2.4, 3.0, 5, 6, 7.5, 2.5]),
'col4': np.array([9, 2, 3, 5, 6, 7, 2]),
}
index = pd.Index(['a', 'b', 'c', 'd', 'e', 'f', 'h'])
self.df =
|
pd.DataFrame(index=index, data=data)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from .constants import *
from . import utils
def extract_base_intensity(maxed, peaks, cells, threshold_peaks):
"""Extract values from `maxed` where `peaks` exceeds `threshold_peaks`. Also extract
cell label at read position from `cells` and i,j positions of reads.
"""
# reads outside of cells get label 0
read_mask = (peaks > threshold_peaks)
values = maxed[:, :, read_mask].transpose([2, 0, 1])
labels = cells[read_mask]
positions = np.array(np.where(read_mask)).T
return values, labels, positions
def format_bases(values, labels, positions, cycles, bases):
"""Arrange ?x?x? arrays of base intensity information into a dataframe in "long" format (one
row per observation).
"""
index = (CYCLE, cycles), (CHANNEL, bases)
try:
df = utils.ndarray_to_dataframe(values, index)
except ValueError:
print('failed to reshape extracted pixels to sequencing bases, writing empty table')
return pd.DataFrame()
df_positions = pd.DataFrame(positions, columns=[POSITION_I, POSITION_J])
df = (df.stack([CYCLE, CHANNEL])
.reset_index()
.rename(columns={0: INTENSITY, 'level_0': READ})
.join(
|
pd.Series(labels, name=CELL)
|
pandas.Series
|
import logging
from io import BytesIO
from pathlib import Path
from typing import List, Union, Generator, Optional, Tuple
from datetime import datetime
import pandas as pd
from pandas import Timestamp
import dateparser
from wetterdienst.data_collection import (
collect_climate_observations_data,
collect_radolan_data,
)
from wetterdienst.enumerations.parameter_enumeration import Parameter
from wetterdienst.enumerations.period_type_enumeration import PeriodType
from wetterdienst.enumerations.time_resolution_enumeration import TimeResolution
from wetterdienst.additionals.functions import (
cast_to_list,
parse_enumeration_from_template,
)
from wetterdienst.exceptions import InvalidParameterCombination, StartDateEndDateError
from wetterdienst.constants.metadata import DWD_FOLDER_MAIN
from wetterdienst.enumerations.column_names_enumeration import DWDMetaColumns
from wetterdienst.indexing.file_index_creation import (
reset_file_index_cache,
create_file_index_for_radolan,
)
log = logging.getLogger(__name__)
class DWDStationRequest:
"""
The DWDStationRequest class represents a request for station data as provided by the
DWD service
"""
def __init__(
self,
station_ids: Union[str, int, List[Union[int, str]]],
parameter: Union[str, Parameter, List[Union[str, Parameter]]],
time_resolution: Union[str, TimeResolution],
period_type: Union[
Union[None, str, PeriodType], List[Union[None, str, PeriodType]]
] = None,
start_date: Union[None, str, Timestamp] = None,
end_date: Union[None, str, Timestamp] = None,
prefer_local: bool = False,
write_file: bool = False,
folder: Union[str, Path] = DWD_FOLDER_MAIN,
tidy_data: bool = True,
humanize_column_names: bool = False,
create_new_file_index: bool = False,
) -> None:
"""
Class with mostly flexible arguments to define a request regarding DWD data.
Special handling for period type. If start_date/end_date are given all period
types are considered and merged together and the data is filtered for the given
dates afterwards.
Args:
station_ids: definition of stations by str, int or list of str/int,
will be parsed to list of int
parameter: str or parameter enumeration defining the requested parameter
time_resolution: str or time resolution enumeration defining the requested
time resolution
period_type: str or period type enumeration defining the requested
period type
start_date: replacement for period type to define exact time of
requested data
end_date: replacement for period type to define exact time of requested data
prefer_local: definition if data should rather be taken from a local source
write_file: should data be written to a local file
folder: place where file lists (and station data) are stored
tidy_data: reshape DataFrame to a more tidy, row based version of data
humanize_column_names: replace column names by more meaningful ones
create_new_file_index: definition if the file index should be recreated
"""
if not (period_type or start_date or end_date):
raise ValueError(
"Define either a 'time_resolution' or one of or both 'start_date' and "
"'end_date' and leave 'time_resolution' empty!"
)
try:
self.station_ids = [
int(station_id) for station_id in cast_to_list(station_ids)
]
except ValueError:
raise ValueError("List of station id's can not be parsed to integers.")
self.parameter = []
for p in cast_to_list(parameter):
self.parameter.append(parse_enumeration_from_template(p, Parameter))
self.time_resolution = parse_enumeration_from_template(
time_resolution, TimeResolution
)
# start date and end date required for collect_data in any case
self.start_date = None
self.end_date = None
if period_type:
# For the case that a period_type is given, parse the period type(s)
self.period_type = []
for pt in cast_to_list(period_type):
if pt is None:
self.period_type.append(None)
else:
self.period_type.append(
parse_enumeration_from_template(pt, PeriodType)
)
# Additional sorting required for self.period_type to ensure that for
# multiple periods the data is first sourced from historical
self.period_type = sorted(self.period_type)
else:
# working with ranges of data means expecting data to be laying between
# periods, thus including all periods
self.period_type = [
PeriodType.HISTORICAL,
PeriodType.RECENT,
PeriodType.NOW,
]
# If only one date given, make the other one equal
if not start_date:
start_date = end_date
if not end_date:
end_date = start_date
self.start_date = Timestamp(dateparser.parse(start_date))
self.end_date = Timestamp(dateparser.parse(end_date))
if not self.start_date <= self.end_date:
raise StartDateEndDateError(
"Error: 'start_date' must be smaller or equal to 'end_date'."
)
self.prefer_local = prefer_local
self.write_file = write_file
self.folder = folder
# If more then one parameter requested, automatically tidy data
self.tidy_data = len(self.parameter) == 2 or tidy_data
self.humanize_column_names = humanize_column_names
self.create_new_file_index = create_new_file_index
def __eq__(self, other):
return [
self.station_ids,
self.parameter,
self.time_resolution,
self.period_type,
self.start_date,
self.end_date,
] == other
def __str__(self):
station_ids_joined = "& ".join(
[str(station_id) for station_id in self.station_ids]
)
return ", ".join(
[
f"station_ids {station_ids_joined}",
"& ".join([parameter.value for parameter in self.parameter]),
self.time_resolution.value,
"& ".join([period_type.value for period_type in self.period_type]),
self.start_date.value,
self.end_date.value,
]
)
def collect_data(self) -> Generator[pd.DataFrame, None, None]:
"""
Method to collect data for a defined request. The function is build as generator
in order to not cloak the memory thus if the user wants the data as one pandas
DataFrame the generator has to be casted to a DataFrame manually via
pd.concat(list(request.collect_data([...])).
Args:
same as init
Returns:
via a generator per station a pandas.DataFrame
"""
if self.create_new_file_index:
reset_file_index_cache()
for station_id in self.station_ids:
df_station = pd.DataFrame()
for parameter in self.parameter:
df_parameter_period = pd.DataFrame()
for period_type in self.period_type:
try:
df_period = collect_climate_observations_data(
station_ids=[station_id],
parameter=parameter,
time_resolution=self.time_resolution,
period_type=period_type,
folder=self.folder,
prefer_local=self.prefer_local,
write_file=self.write_file,
tidy_data=self.tidy_data,
humanize_column_names=self.humanize_column_names,
create_new_file_index=False,
)
except InvalidParameterCombination:
log.info(
f"Combination for "
f"{parameter.value}/"
f"{self.time_resolution.value}/"
f"{period_type} does not exist and is skipped."
)
continue
# Filter out values which already are in the DataFrame
try:
df_period = df_period[
~df_period[DWDMetaColumns.DATE.value].isin(
df_parameter_period[DWDMetaColumns.DATE.value]
)
]
except KeyError:
pass
df_parameter_period = df_parameter_period.append(
df_period, ignore_index=True
)
df_station = df_station.append(df_parameter_period, ignore_index=True)
# Filter for dates range if start_date and end_date are defined
if self.start_date:
df_station = df_station[
(df_station[DWDMetaColumns.DATE.value] >= self.start_date)
& (df_station[DWDMetaColumns.DATE.value] <= self.end_date)
]
# Empty dataframe should be skipped
if df_station.empty:
continue
yield df_station
class DWDRadolanRequest:
"""
API for DWD RADOLAN data requests
"""
def __init__(
self,
time_resolution: Union[str, TimeResolution],
date_times: Optional[Union[str, List[Union[str, datetime]]]] = None,
start_date: Optional[Union[str, datetime]] = None,
end_date: Optional[Union[str, datetime]] = None,
prefer_local: bool = False,
write_file: bool = False,
folder: Union[str, Path] = DWD_FOLDER_MAIN,
):
time_resolution = parse_enumeration_from_template(
time_resolution, TimeResolution
)
if time_resolution not in (TimeResolution.HOURLY, TimeResolution.DAILY):
raise ValueError("RADOLAN only supports hourly and daily resolution.")
self.time_resolution = time_resolution
if date_times == "latest":
file_index_radolan = create_file_index_for_radolan(time_resolution)
self.date_times = pd.Series(
file_index_radolan[DWDMetaColumns.DATETIME.value][-1:]
)
elif date_times:
self.date_times = pd.Series(
|
pd.to_datetime(date_times, infer_datetime_format=True)
|
pandas.to_datetime
|
import fsspec
import intake # must be v 0.6.2
import xarray as xr
import pandas as pd
import os as os
import numpy as np
# An example of how to get the weighted area
# https://nordicesmhub.github.io/NEGI-Abisko-2019/training/Example_model_global_arctic_average.html
## The url path that contains to the pangeo archive table of contents.
url = "https://storage.googleapis.com/cmip6/pangeo-cmip6.json"
catalog = intake.open_esm_datastore(url)
# Set the parameters of our serach, we need the tos and the ocean cell area.
expts = ['historical']
cmip_vars = ["tos"]
query = dict(
experiment_id=expts,
variable_id=cmip_vars,
grid_label="gn",
table_id="Omon"
)
catalog = catalog.search(require_all_on=["source_id"], **query)
catalog = catalog.df.copy().reset_index(drop=True)
catalog = catalog.loc[catalog['member_id'].str.contains('p1')].copy().reset_index(drop=True)
def get_ds_meta(ds):
""" Get the meta data information from the xarray data set.
:param ds: xarray dataset of CMIP data.
:return: pandas dataset of MIP information.
"""
v = ds.variable_id
data = [{'variable': v,
'experiment': ds.experiment_id,
'units': ds[v].attrs['units'],
'ensemble': ds.attrs["variant_label"],
'model': ds.source_id}]
df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
"""
This class interpolates dataframe positions based on Datetime.
It provides the user with the flexibility to use linear or cubic interpolation.
In general, the user passes the dataframe, time jum and the interpolation type,
based on the type the proper function is mapped. And if the time difference
exceeds the time jump, the interpolated point is added to the position with large jump
with a time increase of time jump. This interpolated row is added to the dataframe.
| Authors: <NAME>, <NAME>
"""
import itertools
import multiprocessing as mlp
import os
from math import ceil
from typing import Optional, Text, Union
import pandas
import pandas as pd
from ptrail.core.TrajectoryDF import PTRAILDataFrame as NumTrajDF
from ptrail.preprocessing.helpers import Helpers as helper
from ptrail.utilities import constants as const
num = os.cpu_count()
NUM_CPU = ceil((num * 2) / 3)
class Interpolation:
@staticmethod
def interpolate_position(dataframe: NumTrajDF, time_jump: float, ip_type: Optional[Text] = 'linear'):
"""
Interpolate the position of an object and create new points using one of
the interpolation methods provided by the Library. Currently, the library
supports the following 4 interpolation methods:
1. Linear Interpolation
2. Cubic-Spline Interpolation
3. Kinematic Interpolation
4. Random Walk Interpolation
Warning
-------
The Interpolation methods will only return the 4 mandatory library columns
because it is not possible to interpolate other data that may or may not be
present in the dataset apart from latitude, longitude and datetime. As a
result, other columns are dropped.
Note
----
The time-jump parameter specifies where the new points are to be
inserted based on the time difference between 2 consecutive points.
However, it does not guarantee that the dataset will be brought down
to having difference between 2 consecutive points equal to or
less than the user specified time jump.
Note
----
The time-jump is specified in seconds. Hence, if the user-specified
time-jump is not sensible, then the execution of the method will take
a very long time.
Parameters
----------
dataframe: PTRAILDataFrame
The dataframe containing the original dataset.
time_jump: float
The maximum time difference between 2 consecutive points.
ip_type: Optional[Text], default = linear
The type of interpolation that is to be used.
Returns
-------
PTRAILDataFrame:
The dataframe containing the interpolated trajectory points.
"""
# First, lets split the dataframe into smaller chunks containing
# points of only n trajectory per chunk. n is
df = dataframe.reset_index()
df_chunks = helper._df_split_helper(df)
# Create a pool of processes which has number of processes
# equal to the number of unique dataframe partitions.
processes = [None] * len(df_chunks)
manager = mlp.Manager()
return_list = manager.list()
ip_type = ip_type.lower().strip()
if ip_type == 'linear':
for i in range(len(processes)):
processes[i] = mlp.Process(target=Interpolation._linear_ip,
args=(df_chunks[i], time_jump, return_list))
processes[i].start()
for j in range(len(processes)):
processes[j].join()
elif ip_type == 'cubic':
for i in range(len(processes)):
processes[i] = mlp.Process(target=Interpolation._cubic_ip,
args=(df_chunks[i], time_jump, return_list))
processes[i].start()
for j in range(len(processes)):
processes[j].join()
elif ip_type == 'kinematic':
for i in range(len(processes)):
processes[i] = mlp.Process(target=Interpolation._kinematic_ip,
args=(df_chunks[i], time_jump, return_list))
processes[i].start()
for j in range(len(processes)):
processes[j].join()
elif ip_type == 'random-walk':
for i in range(len(processes)):
processes[i] = mlp.Process(target=Interpolation._random_walk_ip,
args=(df_chunks[i], time_jump, return_list))
processes[i].start()
for j in range(len(processes)):
processes[j].join()
else:
raise ValueError(f"Interpolation type: {ip_type} specified does not exist. Please check the"
"interpolation type specified and type again.")
return NumTrajDF(pd.concat(return_list).reset_index(),
const.LAT, const.LONG, const.DateTime, const.TRAJECTORY_ID)
@staticmethod
def _linear_ip(dataframe: Union[pd.DataFrame, NumTrajDF], time_jump: float, return_list: list):
"""
Interpolate the position of points using the Linear Interpolation method. It makes
the use of numpy's interpolation technique for the interpolation of the points.
WARNING: Do not use this method directly as it will run slower. Instead,
use the method interpolate_position() and specify the ip_type as
linear to perform linear interpolation much faster.
Parameters
----------
dataframe: PTRAILDataFrame
The dataframe containing the original data.
time_jump: float
The maximum time difference between 2 points. If the time difference between
2 consecutive points is greater than the time jump, then another point will
be inserted between the given 2 points.
return_list: list
The list used by the Multiprocessing manager to get the return values
Returns
-------
pandas.core.dataframe.DataFrame:
The dataframe enhanced with interpolated points.
"""
# First, reset the index, extract the Latitude, Longitude, DateTime and Trajectory ID columns
# and set the DateTime column only as the index. Then, store all the unique Trajectory IDs in
# a list.
dataframe = dataframe.reset_index()[
[const.DateTime, const.TRAJECTORY_ID, const.LAT, const.LONG]].set_index(const.DateTime)
# Split the smaller dataframe further into smaller chunks containing only 1
# Trajectory ID per index.
ids_ = list(dataframe[const.TRAJECTORY_ID].value_counts().keys())
df_chunks = [dataframe.loc[dataframe[const.TRAJECTORY_ID] == ids_[i]] for i in range(len(ids_))]
# Here, create as many processes at once as there are number of CPUs available in
# the system - 1. One CPU is kept free at all times in order to not block up
# the system. (Note: The blocking of system is mostly prevalent in Windows and does
# not happen very often in Linux. However, out of caution 1 CPU is kept free regardless
# of the system.)
small_pool = mlp.Pool(NUM_CPU)
final = small_pool.starmap(helper.linear_help,
zip(df_chunks, ids_, itertools.repeat(time_jump)))
small_pool.close()
small_pool.join()
# Append the smaller dataframe to process manager list so that result
# can be finally merged into a larger dataframe.
return_list.append(pd.concat(final))
@staticmethod
def _cubic_ip(dataframe: Union[pd.DataFrame, NumTrajDF], time_jump: float, return_list: list):
try:
"""
Method for cubic interpolation of a dataframe based on the time jump provided.
It makes use of scipy library's CubicSpline functionality and interpolates
the coordinates based on the Datetime of the dataframe.
WARNING: Do not use this method directly as it will run slower. Instead,
use the method interpolate_position() and specify the ip_type as
cubic to perform cubic interpolation much faster.
Parameters
----------
dataframe: Union[pd.DataFrame, NumTrajDF]
The dataframe on which interpolation is to be performed
time_jump: float
The maximum time difference allowed to have between rows
return_list: list
The list used by the Multiprocessing manager to get the return values
Returns
-------
pandas.core.dataframe.DataFrame:
The dataframe containing the new interpolated points.
"""
# First, reset the index, extract the Latitude, Longitude, DateTime and Trajectory ID columns
# and set the DateTime column only as the index. Then, store all the unique Trajectory IDs in
# a list.
dataframe = dataframe.reset_index()[
[const.DateTime, const.TRAJECTORY_ID, const.LAT, const.LONG]].set_index(const.DateTime)
# Split the smaller dataframe further into smaller chunks containing only 1
# Trajectory ID per index.
ids_ = list(dataframe[const.TRAJECTORY_ID].value_counts().keys())
df_chunks = [dataframe.loc[dataframe[const.TRAJECTORY_ID] == ids_[i]] for i in range(len(ids_))]
# Here, create as many processes at once as there are number of CPUs available in
# the system - 1. One CPU is kept free at all times in order to not block up
# the system. (Note: The blocking of system is mostly prevalent in Windows and does
# not happen very often in Linux. However, out of caution 1 CPU is kept free regardless
# of the system.).
small_pool = mlp.Pool(NUM_CPU)
final = small_pool.starmap(helper.cubic_help,
zip(df_chunks, ids_, itertools.repeat(time_jump)))
small_pool.close()
small_pool.join()
# Append the smaller dataframe to process manager list so that result
# can be finally merged into a larger dataframe.
return_list.append(pd.concat(final))
except ValueError:
raise ValueError
@staticmethod
def _kinematic_ip(dataframe: Union[pd.DataFrame, NumTrajDF], time_jump, return_list):
"""
Method for Kinematic interpolation of a dataframe based on the time jump provided.
It interpolates the coordinates based on the Datetime of the dataframe.
WARNING: Do not use this method directly as it will run slower. Instead,
use the method interpolate_position() and specify the ip_type as
kinematic to perform kinematic interpolation much faster.
Parameters
----------
dataframe: Union[pd.DataFrame, NumTrajDF]
The dataframe on which interpolation is to be performed
time_jump: float
The maximum time difference allowed to have between rows
return_list: list
The list used by the Multiprocessing manager to get the return values
Returns
-------
pandas.core.dataframe.DataFrame:
The dataframe containing the new interpolated points.
"""
# First, reset the index, extract the Latitude, Longitude, DateTime and Trajectory ID columns
# and set the DateTime column only as the index. Then, store all the unique Trajectory IDs in
# a list.
dataframe = dataframe.reset_index()[
[const.DateTime, const.TRAJECTORY_ID, const.LAT, const.LONG]].set_index(const.DateTime)
# Split the smaller dataframe further into smaller chunks containing only 1
# Trajectory ID per index.
ids_ = list(dataframe[const.TRAJECTORY_ID].value_counts().keys())
df_chunks = [dataframe.loc[dataframe[const.TRAJECTORY_ID] == ids_[i]] for i in range(len(ids_))]
# Here, create as many processes at once as there are number of CPUs available in
# the system - 1. One CPU is kept free at all times in order to not block up
# the system. (Note: The blocking of system is mostly prevalent in Windows and does
# not happen very often in Linux. However, out of caution 1 CPU is kept free regardless
# of the system.).
small_pool = mlp.Pool(NUM_CPU)
final = small_pool.starmap(helper.kinematic_help,
zip(df_chunks, ids_, itertools.repeat(time_jump)))
small_pool.close()
small_pool.join()
# Append the smaller dataframe to process manager list so that result
# can be finally merged into a larger dataframe.
return_list.append(pd.concat(final))
@staticmethod
def _random_walk_ip(dataframe: Union[pd.DataFrame, NumTrajDF], time_jump, return_list):
"""
Method for Random walk interpolation of a dataframe based on the time jump provided.
It interpolates the coordinates based on the Datetime of the dataframe.
WARNING: Do not use this method directly as it will run slower. Instead,
use the method interpolate_position() and specify the ip_type as
random-walk to perform random walk interpolation much faster.
Parameters
----------
dataframe: Union[pd.DataFrame, NumTrajDF]
The dataframe on which interpolation is to be performed
time_jump: float
The maximum time difference allowed to have between rows
return_list: list
The list used by the Multiprocessing manager to get the return values
Returns
-------
pandas.core.dataframe.DataFrame:
The dataframe containing the new interpolated points.
"""
# First, reset the index, extract the Latitude, Longitude, DateTime and Trajectory ID columns
# and set the DateTime column only as the index. Then, store all the unique Trajectory IDs in
# a list.
dataframe = dataframe.reset_index()[
[const.DateTime, const.TRAJECTORY_ID, const.LAT, const.LONG]].set_index(const.DateTime)
# Split the smaller dataframe further into smaller chunks containing only 1
# Trajectory ID per index.
ids_ = list(dataframe[const.TRAJECTORY_ID].value_counts().keys())
df_chunks = [dataframe.loc[dataframe[const.TRAJECTORY_ID] == ids_[i]] for i in range(len(ids_))]
# Here, create as many processes at once as there are number of CPUs available in
# the system - 1. One CPU is kept free at all times in order to not block up
# the system. (Note: The blocking of system is mostly prevalent in Windows and does
# not happen very often in Linux. However, out of caution 1 CPU is kept free regardless
# of the system.).
small_pool = mlp.Pool(NUM_CPU)
final = small_pool.starmap(helper.random_walk_help,
zip(df_chunks, ids_, itertools.repeat(time_jump)))
small_pool.close()
small_pool.join()
# Append the smaller dataframe to process manager list so that result
# can be finally merged into a larger dataframe.
return_list.append(
|
pd.concat(final)
|
pandas.concat
|
from scipy.signal import butter, lfilter, resample, firwin, decimate
from sklearn.decomposition import FastICA, PCA
from sklearn import preprocessing
import numpy as np
import pandas as np
import matplotlib.pyplot as plt
import scipy
import pandas as pd
class SpectrogramImage:
"""
Plot spectrogram for each channel and convert it to numpy image array.
"""
def __init__(self, size=(224, 224, 4)):
self.size = size
def get_name(self):
return 'img-spec-{}'.format(self.size)
def drop_zeros(self, df):
return df[(df.T != 0).any()]
def apply(self, data):
data = pd.DataFrame(data.T)
data = self.drop_zeros(data)
channels = []
for col in data.columns:
plt.ioff()
_, _, _, _ = plt.specgram(data[col], NFFT=2048, Fs=240000/600, noverlap=int((240000/600)*0.005), cmap=plt.cm.spectral)
plt.axis('off')
plt.savefig('spec.png', bbox_inches='tight', pad_inches=0)
plt.close()
im = scipy.misc.imread('spec.png', mode='RGB')
im = scipy.misc.imresize(im, (224, 224, 3))
channels.append(im)
return channels
class UnitScale:
"""
Scale across the last axis.
"""
def get_name(self):
return 'unit-scale'
def apply(self, data):
return preprocessing.scale(data, axis=data.ndim - 1)
class UnitScaleFeat:
"""
Scale across the first axis, i.e. scale each feature.
"""
def get_name(self):
return 'unit-scale-feat'
def apply(self, data):
return preprocessing.scale(data, axis=0)
class FFT:
"""
Apply Fast Fourier Transform to the last axis.
"""
def get_name(self):
return "fft"
def apply(self, data):
axis = data.ndim - 1
return np.fft.rfft(data, axis=axis)
class ICA:
"""
apply ICA experimental!
"""
def __init__(self, n_components=None):
self.n_components = n_components
def get_name(self):
if self.n_components != None:
return "ICA%d" % (self.n_components)
else:
return 'ICA'
def apply(self, data):
# apply pca to each
ica = FastICA()
data = ica.fit_transform(da)
return data
class Resample:
"""
Resample time-series data.
"""
def __init__(self, sample_rate):
self.f = sample_rate
def get_name(self):
return "resample%d" % self.f
def apply(self, data):
axis = data.ndim - 1
if data.shape[-1] > self.f:
return resample(data, self.f, axis=axis)
return data
class Magnitude:
"""
Take magnitudes of Complex data
"""
def get_name(self):
return "mag"
def apply(self, data):
return np.absolute(data)
class LPF:
"""
Low-pass filter using FIR window
"""
def __init__(self, f):
self.f = f
def get_name(self):
return 'lpf%d' % self.f
def apply(self, data):
nyq = self.f / 2.0
cutoff = min(self.f, nyq - 1)
h = firwin(numtaps=101, cutoff=cutoff, nyq=nyq)
# data[ch][dim0]
# apply filter over each channel
for j in range(len(data)):
data[j] = lfilter(h, 1.0, data[j])
return data
class Mean:
"""
extract channel means
"""
def get_name(self):
return 'mean'
def apply(self, data):
axis = data.ndim - 1
return data.mean(axis=axis)
class Abs:
"""
extract channel means
"""
def get_name(self):
return 'abs'
def apply(self, data):
return np.abs(data)
class Stats:
"""
Subtract the mean, then take (min, max, standard_deviation) for each channel.
"""
def get_name(self):
return "stats"
def apply(self, data):
# data[ch][dim]
shape = data.shape
out = np.empty((shape[0], 3))
for i in range(len(data)):
ch_data = data[i]
ch_data = data[i] - np.mean(ch_data)
outi = out[i]
outi[0] = np.std(ch_data)
outi[1] = np.min(ch_data)
outi[2] = np.max(ch_data)
return out
class Interp:
"""
Interpolate zeros max --> min * 1.0
NOTE: try different methods later
"""
def get_name(self):
return "interp"
def apply(self, data):
# interps 0 data before taking log
indices = np.where(data <= 0)
data[indices] = np.max(data)
data[indices] = (np.min(data) * 0.1)
return data
class Log10:
"""
Apply Log10
"""
def get_name(self):
return "log10"
def apply(self, data):
# interps 0 data before taking log
indices = np.where(data <= 0)
data[indices] = np.max(data)
data[indices] = (np.min(data) * 0.1)
return np.log10(data)
class Slice:
"""
Take a slice of the data on the last axis.
e.g. Slice(1, 48) works like a normal python slice, that is 1-47 will be taken
"""
def __init__(self, start, end):
self.start = start
self.end = end
def get_name(self):
return "slice%d-%d" % (self.start, self.end)
def apply(self, data):
s = [slice(None), ] * data.ndim
s[-1] = slice(self.start, self.end)
return data[s]
class CorrelationMatrix:
"""
Calculate correlation coefficients matrix across all EEG channels.
"""
def get_name(self):
return 'corr-mat'
def apply(self, data):
return upper_right_triangle(np.corrcoef(data))
# Fix everything below here
class Eigenvalues:
"""
Take eigenvalues of a matrix, and sort them by magnitude in order to
make them useful as features (as they have no inherent order).
"""
def get_name(self):
return 'eigenvalues'
def apply(self, data):
w, v = np.linalg.eig(data)
w = np.absolute(w)
w.sort()
return w
class FreqCorrelation:
"""
Correlation in the frequency domain. First take FFT with (start, end) slice options,
then calculate correlation co-efficients on the FFT output, followed by calculating
eigenvalues on the correlation co-efficients matrix.
The output features are (fft, upper_right_diagonal(correlation_coefficients), eigenvalues)
Features can be selected/omitted using the constructor arguments.
"""
def __init__(self, start, end, scale_option, with_fft=False, with_corr=True, with_eigen=True):
self.start = start
self.end = end
self.scale_option = scale_option
self.with_fft = with_fft
self.with_corr = with_corr
self.with_eigen = with_eigen
assert scale_option in ('us', 'usf', 'none')
assert with_corr or with_eigen
def get_name(self):
selections = []
if not self.with_corr:
selections.append('nocorr')
if not self.with_eigen:
selections.append('noeig')
if len(selections) > 0:
selection_str = '-' + '-'.join(selections)
else:
selection_str = ''
return 'freq-correlation-%d-%d-%s-%s%s' % (self.start, self.end, 'withfft' if self.with_fft else 'nofft',
self.scale_option, selection_str)
def apply(self, data):
data1 = FFT().apply(data)
data1 = Slice(self.start, self.end).apply(data1)
data1 = Magnitude().apply(data1)
data1 = Log10().apply(data1)
data2 = data1
if self.scale_option == 'usf':
data2 = UnitScaleFeat().apply(data2)
elif self.scale_option == 'us':
data2 = UnitScale().apply(data2)
data2 = CorrelationMatrix().apply(data2)
if self.with_eigen:
w = Eigenvalues().apply(data2)
out = []
if self.with_corr:
data2 = upper_right_triangle(data2)
out.append(data2)
if self.with_eigen:
out.append(w)
if self.with_fft:
data1 = data1.ravel()
out.append(data1)
for d in out:
assert d.ndim == 1
return np.concatenate(out, axis=0)
class TimeCorrelation:
"""
Correlation in the time domain. First downsample the data, then calculate correlation co-efficients
followed by calculating eigenvalues on the correlation co-efficients matrix.
The output features are (upper_right_diagonal(correlation_coefficients), eigenvalues)
Features can be selected/omitted using the constructor arguments.
"""
def __init__(self, max_hz, scale_option, with_corr=True, with_eigen=True):
self.max_hz = max_hz
self.scale_option = scale_option
self.with_corr = with_corr
self.with_eigen = with_eigen
assert scale_option in ('us', 'usf', 'none')
assert with_corr or with_eigen
def get_name(self):
selections = []
if not self.with_corr:
selections.append('nocorr')
if not self.with_eigen:
selections.append('noeig')
if len(selections) > 0:
selection_str = '-' + '-'.join(selections)
else:
selection_str = ''
return 'time-correlation-r%d-%s%s' % (self.max_hz, self.scale_option, selection_str)
def apply(self, data):
# so that correlation matrix calculation doesn't crash
for ch in data:
if np.alltrue(ch == 0.0):
ch[-1] += 0.00001
data1 = data
if data1.shape[1] > self.max_hz:
data1 = Resample(self.max_hz).apply(data1)
if self.scale_option == 'usf':
data1 = UnitScaleFeat().apply(data1)
elif self.scale_option == 'us':
data1 = UnitScale().apply(data1)
data1 = CorrelationMatrix().apply(data1)
if self.with_eigen:
w = Eigenvalues().apply(data1)
out = []
if self.with_corr:
data1 = upper_right_triangle(data1)
out.append(data1)
if self.with_eigen:
out.append(w)
for d in out:
assert d.ndim == 1
return np.concatenate(out, axis=0)
class TimeFreqCorrelation:
"""
Combines time and frequency correlation, taking both correlation coefficients and eigenvalues.
"""
def __init__(self, start, end, max_hz, scale_option):
self.start = start
self.end = end
self.max_hz = max_hz
self.scale_option = scale_option
assert scale_option in ('us', 'usf', 'none')
def get_name(self):
return 'time-freq-correlation-%d-%d-r%d-%s' % (self.start, self.end, self.max_hz, self.scale_option)
def apply(self, data):
data1 = TimeCorrelation(self.max_hz, self.scale_option).apply(data)
data2 = FreqCorrelation(self.start, self.end,
self.scale_option).apply(data)
assert data1.ndim == data2.ndim
return np.concatenate((data1, data2), axis=data1.ndim - 1)
class FFTWithTimeFreqCorrelation:
"""
Combines FFT with time and frequency correlation, taking both correlation coefficients and eigenvalues.
"""
def __init__(self, start, end, max_hz, scale_option):
self.start = start
self.end = end
self.max_hz = max_hz
self.scale_option = scale_option
def get_name(self):
return 'fft-with-time-freq-corr-%d-%d-r%d-%s' % (self.start, self.end, self.max_hz, self.scale_option)
def apply(self, data):
data1 = TimeCorrelation(self.max_hz, self.scale_option).apply(data)
data2 = FreqCorrelation(self.start, self.end,
self.scale_option, with_fft=True).apply(data)
assert data1.ndim == data2.ndim
return np.concatenate((data1, data2), axis=data1.ndim - 1)
def upper_right_triangle(matrix):
indices = np.triu_indices_from(matrix)
return
|
np.asarray(matrix[indices])
|
pandas.asarray
|
import unittest
import pandas as pd
import pinkfish as pf
class TestStatistics(unittest.TestCase):
def setUp(self):
d = {"open": [1.0, 2.0, 3.0],
"high": [1.2, 2.2, 3.2],
"low": [0.9, 1.9, 2.9],
"close": [1.1, 2.1, 3.1],
"adj_close": [1.15, 2.15, 3.15],
"cumul_total": [5, 10, 15]}
self.df_test =
|
pd.DataFrame(d)
|
pandas.DataFrame
|
#!/usr/bin/env python3
import argparse
import pandas as pd
from model import NeuralNet, BaselineModel
import torch
import config
parser = argparse.ArgumentParser()
parser.add_argument('--input_csv', default='submission/input.csv')
args = parser.parse_args()
# Config
output_file_path = 'test/predictions.csv'
# Load input.csv
with open(args.input_csv) as input_csv:
df = pd.read_csv(input_csv)
def neural_predict():
# Run predictions
y_predictions = NeuralNet(model_file_path='src/model.pickle').predict(df)
# Save predictions to file
y_predictions = y_predictions.detach().numpy()
df_predictions = pd.DataFrame(y_predictions)
df_predictions.columns = ['prediction']
df_predictions.to_csv(output_file_path, index=False)
print(f'{len(y_predictions)} predictions saved to a csv file')
def encoded_predict():
# old version:
# Run predictions
y_predictions = BaselineModel(model_file_path='src/model.pickle').predict(df)
# Save predictions to file
df_predictions =
|
pd.DataFrame({'prediction': y_predictions})
|
pandas.DataFrame
|
#!/usr/bin/python
import os
import matplotlib
from functools import reduce
matplotlib.use('Agg')
import pylab as pl
import numpy as np
import pandas as pd
import re
from bs4 import BeautifulSoup
from collections import defaultdict
from sklearn import cross_validation
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import PCA
from sklearn.metrics import roc_auc_score
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
def review_to_wordlist(review, remove_stopwords=False):
# Function to convert a document to a sequence of words,
# optionally removing stop words. Returns a list of words.
#
# 1. Remove HTML
review_text = BeautifulSoup(review).get_text()
#
# 2. Remove non-letters
review_text = re.sub("[^a-zA-Z]"," ", review_text)
#
# 3. Convert words to lower case and split them
words = review_text.lower().split()
#
# 4. Optionally remove stop words (false by default)
if remove_stopwords:
from nltk.corpus import stopwords
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
#
# 5. Return a list of words
return(words)
def clean_review_function(review):
global master_word_dict, number_of_rows
list_of_words = review_to_wordlist(review, remove_stopwords=False)
return ' '.join(list_of_words)
def load_data():
train_df = pd.read_json('train.json')
subm_df = pd.read_csv('sampleSubmission.csv')
test_df = pd.read_json('test.json')
YCOL = 'requester_received_pizza'
XCOLS_KEEP = ['requester_account_age_in_days_at_request', 'requester_days_since_first_post_on_raop_at_request', 'requester_number_of_comments_at_request', 'requester_number_of_comments_in_raop_at_request', 'requester_number_of_posts_at_request', 'requester_number_of_posts_on_raop_at_request', 'requester_number_of_subreddits_at_request', 'requester_upvotes_minus_downvotes_at_request', 'requester_upvotes_plus_downvotes_at_request', 'unix_timestamp_of_request', 'unix_timestamp_of_request_utc']
XCOLS_TOSS = ['number_of_downvotes_of_request_at_retrieval', 'number_of_upvotes_of_request_at_retrieval', 'post_was_edited', 'request_number_of_comments_at_retrieval', 'request_text', 'requester_account_age_in_days_at_retrieval', 'requester_days_since_first_post_on_raop_at_retrieval', 'requester_number_of_comments_at_retrieval', 'requester_number_of_comments_in_raop_at_retrieval', 'requester_number_of_posts_at_retrieval', 'requester_number_of_posts_on_raop_at_retrieval', 'requester_upvotes_minus_downvotes_at_retrieval', 'requester_upvotes_plus_downvotes_at_retrieval', 'requester_user_flair', 'request_id', 'giver_username_if_known']
train_df = train_df.drop(labels=XCOLS_TOSS, axis=1)
use_text_data = True
if use_text_data:
clean_train_review = train_df['request_text_edit_aware'].apply(clean_review_function)
clean_test_review = test_df['request_text_edit_aware'].apply(clean_review_function)
clean_train_title = train_df['request_title'].apply(clean_review_function)
clean_test_title = test_df['request_title'].apply(clean_review_function)
#for df in train_df, test_df:
#for c in 'request_text_edit_aware', 'request_title':
#print c, df[c].shape
nfeatures=1000
print('nfeatures', nfeatures)
vectorizer = CountVectorizer(analyzer='word', tokenizer=None, preprocessor=None, stop_words=None, max_features=nfeatures)
train_review_features = vectorizer.fit_transform(clean_train_review).toarray()
test_review_features = vectorizer.transform(clean_test_review).toarray()
train_title_features = vectorizer.transform(clean_train_title).toarray()
test_title_features = vectorizer.transform(clean_test_title).toarray()
print('shape0', train_review_features.shape, test_review_features.shape, train_title_features.shape, test_title_features.shape)
train_df = train_df.drop(labels=['request_text_edit_aware', 'request_title'], axis=1)
test_df = test_df.drop(labels=['request_text_edit_aware', 'request_title'], axis=1)
for df in train_df, test_df:
#df['request_text_edit_aware'] = df['request_text_edit_aware'].map(review_to_wordlist).map(len)
#df['request_title'] = df['request_title'].map(review_to_wordlist).map(len)
df['requester_subreddits_at_request'] = df['requester_subreddits_at_request'].map(len)
df['requester_username'] = df['requester_username'].map(len)
df['requester_account_age_in_days_at_request'] = df['requester_account_age_in_days_at_request'].astype(np.int64)
df['requester_days_since_first_post_on_raop_at_request'] = df['requester_days_since_first_post_on_raop_at_request'].astype(np.int64)
ytrain = train_df['requester_received_pizza'].astype(np.int64).values
train_df = train_df.drop(labels=['requester_received_pizza'], axis=1)
for c in train_df.columns:
if train_df[c].dtype == np.int64:
train_df[c] = train_df[c].astype(np.float64)
test_df[c] = test_df[c].astype(np.float64)
if use_text_data:
print('shape1', train_df.values[:,2:].shape, train_review_features.shape, train_title_features.shape)
xtrain = np.hstack([train_df.values[:,2:], train_review_features, train_title_features])
xtest = np.hstack([test_df.values[:,2:], test_review_features, test_title_features])
ytest = test_df.values[:,1]
else:
xtrain = train_df.values[:,2:]
xtest = test_df.values[:,2:]
ytest = test_df.values[:,1]
print('shape2', xtrain.shape, ytrain.shape, xtest.shape, ytest.shape)
return xtrain, ytrain, xtest, ytest
def score_model(model, xtrain, ytrain):
randint = reduce(lambda x,y: x|y, [ord(x)<<(n*8) for (n,x) in enumerate(os.urandom(4))])
xTrain, xTest, yTrain, yTest = cross_validation.train_test_split(xtrain,
ytrain,
test_size=0.4, random_state=randint)
model.fit(xTrain, yTrain)
ytpred = model.predict(xTest)
print('roc', roc_auc_score(yTest, ytpred))
print('score', model.score(xTest, yTest))
return roc_auc_score(yTest, ytpred)
def compare_models(xtraindata, ytraindata):
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.lda import LDA
from sklearn.qda import QDA
classifier_dict = {
#'linSVC': LinearSVC(),
#'kNC5': KNeighborsClassifier(),
#'kNC6': KNeighborsClassifier(6),
#'SVC': SVC(kernel="linear", C=0.025),
#'DT': DecisionTreeClassifier(max_depth=5),
#'RF200': RandomForestClassifier(n_estimators=200, n_jobs=-1),
'RF400gini': RandomForestClassifier(n_estimators=400, n_jobs=-1),
'RF400entropy': RandomForestClassifier(n_estimators=400, n_jobs=-1, criterion='entropy'),
#'RF800': RandomForestClassifier(n_estimators=800, n_jobs=-1),
#'RF1000': RandomForestClassifier(n_estimators=1000, n_jobs=-1),
'Ada': AdaBoostClassifier(),
#'SVClin': SVC(kernel='linear'),
#'SVCpoly': SVC(kernel='poly'),
#'SVCsigmoid': SVC(kernel='sigmoid'),
'Gauss': GaussianNB(),
'LDA': LDA(),
#'QDA': QDA(),
'SVC': SVC(),
}
results = {}
ytrain_vals = []
ytest_vals = []
randint = reduce(lambda x,y: x|y, [ord(x)<<(n*8) for (n,x) in enumerate(os.urandom(4))])
xTrain, xTest, yTrain, yTest = cross_validation.train_test_split(xtraindata,
ytraindata,
test_size=0.4, random_state=randint)
scale = StandardScaler()
xTrain = scale.fit_transform(xTrain)
xTest = scale.transform(xTest)
for name, model in sorted(classifier_dict.items()):
model.fit(xTrain, yTrain)
ytrpred = model.predict(xTrain)
ytpred = model.predict(xTest)
results[name] = roc_auc_score(yTest, ytpred)
ytrain_vals.append(ytrpred)
ytest_vals.append(ytpred)
print(name, results[name], ytest_vals[-1])
print('\n\n\n')
print('shape3', xTrain.shape, xTest.shape, ytrain_vals[0].shape, ytest_vals[0].shape)
xTrain = np.hstack([xTrain]+[y.reshape(xTrain.shape[0],1) for y in ytrain_vals])
xTest = np.hstack([xTest]+[y.reshape(xTest.shape[0],1) for y in ytest_vals])
print('\n\n\n')
model = RandomForestClassifier(n_estimators=400, n_jobs=-1)
model.fit(xTrain, yTrain)
ytpred = model.predict(xTest)
print('RF400', roc_auc_score(yTest, ytpred))
def prepare_submission(model, xtrain, ytrain, xtest, ytest):
model.fit(xtrain, ytrain)
ytest2 = model.predict(xtest)
request_id = ytest
df =
|
pd.DataFrame({'request_id': request_id, 'requester_received_pizza': ytest2}, columns=('request_id','requester_received_pizza'))
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import datetime
import time
from sqlalchemy.sql import func
import pandas as pd
import math
data=
|
pd.read_csv('上证50_daily.csv',index_col=0)
|
pandas.read_csv
|
import numpy as np
from scipy import stats
import json
import pandas as pd
# from ard_lib.two_dim.ard_2d_interface import InterfaceUnit
def is_broken(vector_to_test, sep_value):
error_ratio = 0.05
error_ratio_sup = sep_value * (1 + error_ratio)
error_ratio_inf = sep_value * (1 - error_ratio)
it_is = False
for i in range(len(vector_to_test) - 1):
diff_val = abs(vector_to_test[i] - vector_to_test[i + 1])
if diff_val <= error_ratio_sup:
if diff_val >= error_ratio_inf:
it_is = False
else:
it_is = True
break
else:
# print('less than')
it_is = True
break
return it_is
def get_dist_left(all_x_points_arg, init_x_index_arg, sep_value):
error_ratio = 0.05
error_ratio_sup = sep_value * (1 + error_ratio)
error_ratio_inf = sep_value * (1 - error_ratio)
# Left
l_lim = 0
index_val = init_x_index_arg
while index_val > l_lim:
# print(index)
diff_bound_val = abs(all_x_points_arg[index_val, 0] - all_x_points_arg[index_val - 1, 0])
# print(diff_bound)
if diff_bound_val >= error_ratio_sup or diff_bound_val <= error_ratio_inf:
break
index_val = index_val - 1
f_index_l_val = index_val
dist_l_val = init_x_index_arg - f_index_l_val
return dist_l_val
def get_dist_right(all_x_points_arg, init_x_index_arg, sep_value):
error_ratio = 0.05
error_ratio_sup = sep_value * (1 + error_ratio)
error_ratio_inf = sep_value * (1 - error_ratio)
# Right
r_lim = len(all_x_points_arg) - 1
index_val = init_x_index_arg
while index_val < r_lim:
# print(index)
diff_bound = abs(all_x_points_arg[index_val, 0] - all_x_points_arg[index_val + 1, 0])
# print(diff_bound)
if diff_bound >= error_ratio_sup or diff_bound <= error_ratio_inf:
break
index_val = index_val + 1
f_index_r_val = index_val + 1
dist_r_val = f_index_r_val - init_x_index_arg
return dist_r_val
def get_dist_down(all_y_points_arg, init_y_index_arg, sep_value):
error_ratio = 0.05
error_ratio_sup = sep_value * (1 + error_ratio)
error_ratio_inf = sep_value * (1 - error_ratio)
# Left
d_lim = 0
index_val = init_y_index_arg
while index_val > d_lim:
# print(index_val)
diff_bound = abs(all_y_points_arg[index_val, 1] - all_y_points_arg[index_val - 1, 1])
# print(diff_bound)
if diff_bound >= error_ratio_sup or diff_bound <= error_ratio_inf:
break
index_val = index_val - 1
f_index_d_val = index_val
dist_d_val = init_y_index_arg - f_index_d_val
return dist_d_val
def get_dist_up(all_y_points_arg, init_y_index_arg, sep_value):
error_ratio = 0.05
error_ratio_sup = sep_value * (1 + error_ratio)
error_ratio_inf = sep_value * (1 - error_ratio)
# Right
u_lim = len(all_y_points_arg) - 1
index_val = init_y_index_arg
while index_val < u_lim:
# print(index_val)
diff_bound = abs(all_y_points_arg[index_val, 1] - all_y_points_arg[index_val + 1, 1])
# print(diff_bound)
if diff_bound >= error_ratio_sup or diff_bound <= error_ratio_inf:
break
index_val = index_val + 1
f_index_u_val = index_val + 1
dist_u_val = f_index_u_val - init_y_index_arg
return dist_u_val
def get_final_index_down(data_2d_arg, all_y_points_arg, init_y_index_arg, dist_l_arg, dist_r_arg, sep_value):
# Down
down_lim = 0
index = init_y_index_arg
while index >= down_lim:
# print(index)
temp_y = all_y_points_arg[index, 1]
all_x_points_arg = np.sort(data_2d_arg[data_2d_arg[:, 1] == temp_y], axis=0)
temp_x = all_y_points_arg[index, 0]
temp_x_index = np.where(all_x_points_arg[:, 0] == temp_x)[0][0]
index_lim_sup = temp_x_index + dist_r_arg
index_lim_inf = temp_x_index - dist_l_arg
if index_lim_inf < 0:
index_lim_inf = 0
if index_lim_sup > len(all_x_points_arg):
index_lim_sup = len(all_x_points_arg)
temp_range_lr = range(index_lim_inf, index_lim_sup)
just_x = all_x_points_arg[temp_range_lr, 0]
if is_broken(just_x, sep_value):
break
index = index - 1
final_index_val = index + 1
return final_index_val
def get_final_index_up(data_2d_arg, all_y_points_arg, init_y_index_arg, dist_l_arg, dist_r_arg, sep_value):
# Up
up_lim = len(all_y_points_arg) - 1
index = init_y_index_arg
while index <= up_lim:
# print(index)
temp_y = all_y_points_arg[index, 1]
all_x_points = np.sort(data_2d_arg[data_2d_arg[:, 1] == temp_y], axis=0)
temp_x = all_y_points_arg[index, 0]
temp_x_index = np.where(all_x_points[:, 0] == temp_x)[0][0]
index_lim_sup = temp_x_index + dist_r_arg
index_lim_inf = temp_x_index - dist_l_arg
if index_lim_inf < 0:
index_lim_inf = 0
if index_lim_sup > len(all_x_points):
index_lim_sup = len(all_x_points)
temp_range_lr = range(index_lim_inf, index_lim_sup)
just_x = all_x_points[temp_range_lr, 0]
if is_broken(just_x, sep_value):
break
index = index + 1
final_index_val = index - 1
return final_index_val
def get_final_xy_index_down(data_2d_arg, all_y_points_arg, init_y_index_arg, dist_l_arg, dist_r_arg, sep_value):
# Down
final_index = get_final_index_down(data_2d_arg, all_y_points_arg, init_y_index_arg, dist_l_arg, dist_r_arg, sep_value)
# ---- last step
temp_y = all_y_points_arg[final_index, 1]
all_x_points = np.sort(data_2d_arg[data_2d_arg[:, 1] == temp_y], axis=0)
# ---- plot
temp_x = all_y_points_arg[final_index, 0]
temp_x_index = np.where(all_x_points[:, 0] == temp_x)[0][0]
index_lim_sup = temp_x_index + dist_r_arg
index_lim_inf = temp_x_index - dist_l_arg
if index_lim_inf < 0:
index_lim_inf = 0
if index_lim_sup > len(all_x_points):
index_lim_sup = len(all_x_points)
temp_range_lr = range(index_lim_inf, index_lim_sup)
final_x_min = all_x_points[temp_range_lr, 0].min()
final_x_max = all_x_points[temp_range_lr, 0].max()
final_y_down = temp_y
return final_x_min, final_x_max, final_y_down
def get_final_xy_index_up(data_2d_arg, all_y_points_arg, init_y_index_arg, dist_l_arg, dist_r_arg, sep_value):
# Up
final_index = get_final_index_up(data_2d_arg, all_y_points_arg, init_y_index_arg, dist_l_arg, dist_r_arg, sep_value)
# ---- last step
temp_y = all_y_points_arg[final_index, 1]
all_x_points = np.sort(data_2d_arg[data_2d_arg[:, 1] == temp_y], axis=0)
# ---- plot
temp_x = all_y_points_arg[final_index, 0]
temp_x_index = np.where(all_x_points[:, 0] == temp_x)[0][0]
index_lim_sup = temp_x_index + dist_r_arg
index_lim_inf = temp_x_index - dist_l_arg
if index_lim_inf < 0:
index_lim_inf = 0
if index_lim_sup > len(all_x_points):
index_lim_sup = len(all_x_points)
temp_range_lr = range(index_lim_inf, index_lim_sup)
final_x_min = all_x_points[temp_range_lr, 0].min()
final_x_max = all_x_points[temp_range_lr, 0].max()
final_y_up = temp_y
return final_x_min, final_x_max, final_y_up
def get_separation_value(data_2d_global_arg):
n_sample = 100
x_data = np.unique(np.sort(data_2d_global_arg[:, 0]))
y_data = np.unique(np.sort(data_2d_global_arg[:, 1]))
diffs_x = np.zeros(shape=[n_sample])
diffs_y = np.zeros(shape=[n_sample])
for p in range(n_sample):
x_rand_num = int(np.random.rand() * (len(x_data) - 1))
y_rand_num = int(np.random.rand() * (len(y_data) - 1))
# print(str(x_rand_num) + ' ' + str(y_rand_num))
diffs_x[p] = np.abs(x_data[x_rand_num] - x_data[x_rand_num + 1])
diffs_y[p] = np.abs(y_data[y_rand_num] - y_data[y_rand_num + 1])
sep_value_val = (stats.mode(diffs_x).mode[0] + stats.mode(diffs_y).mode[0]) / 2
# print(sep_value_val)
return sep_value_val
def create_2d_data_from_vertex(vertex_2d_data):
shape_vertex_data = vertex_2d_data.shape
data_2d_global_val = np.zeros(shape=[shape_vertex_data[0], (shape_vertex_data[1] - 1) + 1])
data_2d_global_val[:, [0, 1]] = np.array(vertex_2d_data.loc[:, ['x', 'y']])
data_2d_global_val = np.unique(data_2d_global_val, axis=0)
return data_2d_global_val
class Rectangle:
def __init__(self, x1, x2, y1, y2):
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
self.a = abs(x2 - x1)
self.b = abs(y2 - y1)
self.p1 = np.array([x1, y1])
self.p2 = np.array([x1, y2])
self.p3 = np.array([x2, y1])
self.p4 = np.array([x2, y2])
#
# def get_area(self):
# return abs(self.x2 - self.x1) * abs(self.y2 - self.y1)
def get_area(self):
return self.a * self.b
def get_side_ratio(self):
if self.b == 0:
return 0
else:
return self.a / self.b
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
# array order: [x1, x2, y1, y2]
def save_to_json(path, array_to_save, sep_value):
json_dump = json.dumps({'data': array_to_save, 'sep_value': sep_value}, cls=NumpyEncoder)
outfile = open(path, 'w', encoding='utf-8')
json.dump(json_dump, outfile, ensure_ascii=False, indent=2)
def load_from_json(path):
file = open(path)
json_str = json.load(file)
json_format = json.loads(json_str)
return json_format
# Returns array -> x1 x2 y1 y2 is_checked? gi gj (g:groups) and Summary [group_id, n_elements, diff_y, diff_x]
def create_groups(json_data_arg, sep_value_arg):
data_shape_val = json_data_arg.shape
data_prepros_val = np.zeros(shape=[data_shape_val[0], data_shape_val[1] + 5])
# data_prepros: 0-3(x,y,z) 4(is checked?) 5(area) 6(ratio) 7(g_i) 8(g_j)
sep = sep_value_arg / 2
for i_d in range(len(json_data_arg)):
data_prepros_val[i_d][0] = json_data_arg[i_d][0] - sep
data_prepros_val[i_d][1] = json_data_arg[i_d][1] + sep
data_prepros_val[i_d][2] = json_data_arg[i_d][2] - sep
data_prepros_val[i_d][3] = json_data_arg[i_d][3] + sep
data_prepros_val[i_d][4] = 0 # (is checked?) init in False
# area (x2-x1) * (y2-y1)
diff_x = abs(data_prepros_val[i_d][1] - data_prepros_val[i_d][0])
diff_y = abs(data_prepros_val[i_d][3] - data_prepros_val[i_d][2])
area = diff_x * diff_y
# ratio (x2-x1) / (y2-y1)
ratio = diff_x / diff_y
data_prepros_val[i_d][5] = np.round(area, decimals=4) # area
data_prepros_val[i_d][6] = np.round(ratio, decimals=4) # ratio
# Init groups
data_prepros_pd =
|
pd.DataFrame(data_prepros_val)
|
pandas.DataFrame
|
import pandas as pd
import instances.dinamizators.dinamizators as din
import math
def simplest_test():
'''
Test if the dinamizators are running
'''
df = (
pd.read_pickle('./instances/analysis/df_requests.zip')
.reset_index()
)
din.dinamize_as_berbeglia(df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.delivery_location_x_coord,
df.delivery_location_y_coord,
df.pickup_upper_tw,
df.delivery_upper_tw,
df.pickup_service_time,
0.5,
60)
din.dinamize_as_pureza_laporte(df.depot_location_x,
df.depot_location_y,
df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.pickup_lower_tw,
df.pickup_upper_tw,
0)
din.dinamize_as_pankratz(df.depot_location_x,
df.depot_location_y,
df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.delivery_location_x_coord,
df.delivery_location_y_coord,
df.pickup_upper_tw,
df.delivery_upper_tw,
df.pickup_service_time,
0.5)
din.dinamize_as_fabri_recht(df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.delivery_location_x_coord,
df.delivery_location_y_coord,
df.pickup_lower_tw,
df.delivery_upper_tw)
def test_calculate_travel_time():
pickup_location_x_coord = -1
pickup_location_y_coord = -1
delivery_location_x_coord = 1
delivery_location_y_coord = 1
expected_travel_time = math.ceil(math.sqrt(2) + math.sqrt(2))
calculated_travel_time = (
din.calculate_travel_time(
pickup_location_x_coord,
pickup_location_y_coord,
delivery_location_x_coord,
delivery_location_y_coord)
)
assert (expected_travel_time == calculated_travel_time)
def test_series_elementwise_max():
x = pd.Series([1, 2, 3])
y = pd.Series([3, 2, 1])
expected_max = pd.Series([3, 2, 3])
calculated_max = din.elementwise_max(x, y)
assert (expected_max == calculated_max).all()
def test_dataframe_elementwise_max():
x = pd.DataFrame([[1, 2, 3], [3, 2, 1]])
y = pd.DataFrame([[3, 2, 1], [1, 2, 3]])
expected_max = pd.DataFrame([[3, 2, 3], [3, 2, 3]])
calculated_max = din.elementwise_max(x, y)
assert (expected_max == calculated_max).all().all()
def test_series_elementwise_min():
x = pd.Series([1, 2, 3])
y = pd.Series([3, 2, 1])
expected_min = pd.Series([1, 2, 1])
calculated_min = din.elementwise_min(x, y)
assert (expected_min == calculated_min).all()
def test_dataframe_elementwise_min():
x =
|
pd.DataFrame([[1, 2, 3], [3, 2, 1]])
|
pandas.DataFrame
|
'''parameters and collections of parameters'''
import abc
import itertools
import numbers
import pandas
import six
import warnings
from .util import (NamedObject, Variable, NamedObjectMap, Counter,
NamedDict, combine)
from ..util import get_module_logger
# Created on Jul 14, 2016
#
# .. codeauthor::jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
__all__ = [
'Parameter',
'RealParameter',
'IntegerParameter',
'BooleanParameter',
'CategoricalParameter',
'create_parameters',
'experiment_generator',
'Policy',
'Scenario',
'Experiment']
_logger = get_module_logger(__name__)
class Constant(NamedObject):
'''Constant class,
can be used for any parameter that has to be set to a fixed value
'''
def __init__(self, name, value):
super(Constant, self).__init__(name)
self.value = value
def __repr__(self, *args, **kwargs):
return '{}(\'{}\', {})'.format(self.__class__.__name__,
self.name, self.value)
class Category(Constant):
def __init__(self, name, value):
super(Category, self).__init__(name, value)
def create_category(cat):
if isinstance(cat, Category):
return cat
else:
return Category(str(cat), cat)
class Parameter(Variable):
''' Base class for any model input parameter
Parameters
----------
name : str
lower_bound : int or float
upper_bound : int or float
resolution : collection
pff : bool
if true, sample over this parameter using resolution in case of
partial factorial sampling
Raises
------
ValueError
if lower bound is larger than upper bound
ValueError
if entries in resolution are outside range of lower_bound and
upper_bound
'''
__metaclass__ = abc.ABCMeta
INTEGER = 'integer'
UNIFORM = 'uniform'
def __init__(self, name, lower_bound, upper_bound, resolution=None,
default=None, variable_name=None, pff=False):
super(Parameter, self).__init__(name)
if resolution is None:
resolution = []
for entry in resolution:
if not ((entry >= lower_bound) and (entry <= upper_bound)):
raise ValueError(('resolution not consistent with lower and '
'upper bound'))
if lower_bound >= upper_bound:
raise ValueError('upper bound should be larger than lower bound')
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.resolution = resolution
self.default = default
self.variable_name = variable_name
self.pff = pff
def __eq__(self, other):
comparison = [all(hasattr(self, key) == hasattr(other, key) and
getattr(self, key) == getattr(other, key) for key
in self.__dict__.keys())]
comparison.append(self.__class__ == other.__class__)
return all(comparison)
def __str__(self):
return self.name
def __repr__(self, *args, **kwargs):
start = '{}(\'{}\', {}, {}'.format(self.__class__.__name__,
self.name,
self.lower_bound, self.upper_bound)
if self.resolution:
start += ', resolution={}'.format(self.resolution)
if self.default:
start += ', default={}'.format(self.default)
if self.variable_name != [self.name]:
start += ', variable_name={}'.format(self.variable_name)
if self.pff:
start += ', pff={}'.format(self.pff)
start += ')'
return start
class RealParameter(Parameter):
''' real valued model input parameter
Parameters
----------
name : str
lower_bound : int or float
upper_bound : int or float
resolution : iterable
variable_name : str, or list of str
Raises
------
ValueError
if lower bound is larger than upper bound
ValueError
if entries in resolution are outside range of lower_bound and
upper_bound
'''
def __init__(self, name, lower_bound, upper_bound, resolution=None,
default=None, variable_name=None, pff=False):
super(
RealParameter,
self).__init__(
name,
lower_bound,
upper_bound,
resolution=resolution,
default=default,
variable_name=variable_name,
pff=pff)
self.dist = Parameter.UNIFORM
@property
def params(self):
return (self.lower_bound, self.upper_bound - self.lower_bound)
class IntegerParameter(Parameter):
''' integer valued model input parameter
Parameters
----------
name : str
lower_bound : int
upper_bound : int
resolution : iterable
variable_name : str, or list of str
Raises
------
ValueError
if lower bound is larger than upper bound
ValueError
if entries in resolution are outside range of lower_bound and
upper_bound, or not an numbers.Integral instance
ValueError
if lower_bound or upper_bound is not an numbers.Integral instance
'''
def __init__(self, name, lower_bound, upper_bound, resolution=None,
default=None, variable_name=None, pff=False):
super(
IntegerParameter,
self).__init__(
name,
lower_bound,
upper_bound,
resolution=resolution,
default=default,
variable_name=variable_name,
pff=pff)
lb_int = isinstance(lower_bound, numbers.Integral)
up_int = isinstance(upper_bound, numbers.Integral)
if not (lb_int or up_int):
raise ValueError('lower bound and upper bound must be integers')
for entry in self.resolution:
if not isinstance(entry, numbers.Integral):
raise ValueError(('all entries in resolution should be '
'integers'))
self.dist = Parameter.INTEGER
@property
def params(self):
# scipy.stats.randit uses closed upper bound, hence the +1
return (self.lower_bound, self.upper_bound + 1)
class CategoricalParameter(IntegerParameter):
''' categorical model input parameter
Parameters
----------
name : str
categories : collection of obj
variable_name : str, or list of str
multivalue : boolean
if categories have a set of values, for each variable_name
a different one.
'''
@property
def categories(self):
return self._categories
@categories.setter
def categories(self, values):
self._categories.extend(values)
def __init__(self, name, categories, default=None, variable_name=None,
pff=False, multivalue=False):
lower_bound = 0
upper_bound = len(categories) - 1
if upper_bound == 0:
raise ValueError('there should be more than 1 category')
super(
CategoricalParameter,
self).__init__(
name,
lower_bound,
upper_bound,
resolution=None,
default=default,
variable_name=variable_name,
pff=pff)
cats = [create_category(cat) for cat in categories]
self._categories = NamedObjectMap(Category)
self.categories = cats
self.resolution = [i for i in range(len(self.categories))]
self.multivalue = multivalue
def index_for_cat(self, category):
'''return index of category
Parameters
----------
category : object
Returns
-------
int
'''
for i, cat in enumerate(self.categories):
if cat.name == category:
return i
raise ValueError("category not found")
def cat_for_index(self, index):
'''return category given index
Parameters
----------
index : int
Returns
-------
object
'''
return self.categories[index]
def invert(self, name):
''' invert a category to an integer
Parameters
----------
name : obj
category
Raises
------
ValueError
if category is not found
'''
warnings.warn('deprecated, use index_for_cat instead')
return self.index_for_cat(name)
def __repr__(self, *args, **kwargs):
template1 = 'CategoricalParameter(\'{}\', {}, default={})'
template2 = 'CategoricalParameter(\'{}\', {})'
if self.default:
representation = template1.format(self.name, self.resolution,
self.default)
else:
representation = template2.format(self.name, self.resolution)
return representation
class BinaryParameter(CategoricalParameter):
''' a categorical model input parameter that is only True or False
Parameters
----------
name : str
'''
def __init__(self, name, default=None, ):
super(
BinaryParameter,
self).__init__(
name,
categories=[
False,
True],
default=default)
class BooleanParameter(IntegerParameter):
''' boolean model input parameter
A BooleanParameter is similar to a CategoricalParameter, except
the category values can only be True or False.
Parameters
----------
name : str
variable_name : str, or list of str
'''
def __init__(self, name, default=None, variable_name=None,
pff=False):
super(BooleanParameter, self).__init__(
name, 0, 1, resolution=None, default=default,
variable_name=variable_name, pff=pff)
self.categories = [False, True]
self.resolution = [0, 1]
def __repr__(self, *args, **kwargs):
template1 = 'BooleanParameter(\'{}\', default={})'
template2 = 'BooleanParameter(\'{}\', )'
if self.default:
representation = template1.format(self.name,
self.default)
else:
representation = template2.format(self.name, )
return representation
class Policy(NamedDict):
'''Helper class representing a policy
Attributes
----------
name : str, int, or float
id : int
all keyword arguments are wrapped into a dict.
'''
# TODO:: separate id and name
# if name is not provided fall back on id
# id will always be a number and can be generated by
# a counter
# the new experiment class can than take the names from
# policy and scenario to create a unique name while also
# multiplying the ID's (assuming we count from 1 onward) to get
# a unique experiment ID
id_counter = Counter(1)
def __init__(self, name=Counter(), **kwargs):
# TODO: perhaps move this to seperate function that internally uses
# counter
if isinstance(name, int):
name = f"policy {name}"
super(Policy, self).__init__(name, **kwargs)
self.id = Policy.id_counter()
def to_list(self, parameters):
'''get list like representation of policy where the
parameters are in the order of levers'''
return [self[param.name] for param in parameters]
def __repr__(self):
return "Policy({})".format(super(Policy, self).__repr__())
class Scenario(NamedDict):
'''Helper class representing a scenario
Attributes
----------
name : str, int, or float
id : int
all keyword arguments are wrapped into a dict.
'''
# we need to start from 1 so scenario id is known
id_counter = Counter(1)
def __init__(self, name=Counter(), **kwargs):
super(Scenario, self).__init__(name, **kwargs)
self.id = Scenario.id_counter()
def __repr__(self):
return "Scenario({})".format(super(Scenario, self).__repr__())
class Case(NamedObject):
'''A convenience object that contains a specification
of the model, policy, and scenario to run
TODO:: we need a better name for this. probably this should be
named Experiment, while Experiment should be
ExperimentReplication
'''
def __init__(self, name, model_name, policy, scenario, experiment_id):
super(Case, self).__init__(name)
self.experiment_id = experiment_id
self.policy = policy
self.model_name = model_name
self.scenario = scenario
class Experiment(NamedDict):
'''helper class that combines scenario, policy, any constants, and
replication information (seed etc) into a single dictionary.
'''
def __init__(self, scenario, policy, constants, replication=None):
scenario_id = scenario.id
policy_id = policy.id
if replication is None:
replication_id = 1
else:
replication_id = replication.id
constants = combine(constants, replication)
# this is a unique identifier for an experiment
# we might also create a better looking name
self.id = scenario_id * policy_id * replication_id
name = '{}_{}_{}'.format(scenario.name, policy.name, replication_id)
super(Experiment, self).__init__(
name, **combine(scenario, policy, constants))
def experiment_generator(scenarios, model_structures, policies):
'''
generator function which yields experiments
Parameters
----------
designs : iterable of dicts
model_structures : list
policies : list
Notes
-----
this generator is essentially three nested loops: for each model structure,
for each policy, for each scenario, return the experiment. This means
that designs should not be a generator because this will be exhausted after
the running the first policy on the first model.
'''
jobs = itertools.product(model_structures, policies, scenarios)
for i, job in enumerate(jobs):
msi, policy, scenario = job
name = '{} {} {}'.format(msi.name, policy.name, i)
case = Case(name, msi.name, policy, scenario, i)
yield case
def parameters_to_csv(parameters, file_name):
'''Helper function for writing a collection of parameters to a csv file
Parameters
----------
parameters : collection of Parameter instances
file_name : str
The function iterates over the collection and turns these into a data
frame prior to storing them. The resulting csv can be loaded using the
create_parameters function. Note that currently we don't store resolution
and default attributes.
'''
params = {}
for i, param in enumerate(parameters):
if isinstance(param, CategoricalParameter):
values = param.resolution
else:
values = param.lower_bound, param.upper_bound
dict_repr = {j: value for j, value in enumerate(values)}
dict_repr['name'] = param.name
params[i] = dict_repr
params = pandas.DataFrame.from_dict(params, orient='index')
# for readability it is nice if name is the first column, so let's
# ensure this
cols = params.columns.tolist()
cols.insert(0, cols.pop(cols.index('name')))
params = params.reindex(columns=cols)
# we can now safely write the dataframe to a csv
pandas.DataFrame.to_csv(params, file_name, index=False)
def create_parameters(uncertainties, **kwargs):
'''Helper function for creating many Parameters based on a DataFrame
or csv file
Parameters
----------
uncertainties : str, DataFrame
**kwargs : dict, arguments to pass to pandas.read_csv
Returns
-------
list of Parameter instances
This helper function creates uncertainties. It assumes that the
DataFrame or csv file has a column titled 'name', optionally a type column
{int, real, cat}, can be included as well. the remainder of the columns
are handled as values for the parameters. If type is not specified,
the function will try to infer type from the values.
Note that this function does not support the resolution and default kwargs
on parameters.
An example of a csv:
NAME,TYPE,,,
a_real,real,0,1.1,
an_int,int,1,9,
a_categorical,cat,a,b,c
this CSV file would result in
[RealParameter('a_real', 0, 1.1, resolution=[], default=None),
IntegerParameter('an_int', 1, 9, resolution=[], default=None),
CategoricalParameter('a_categorical', ['a', 'b', 'c'], default=None)]
'''
if isinstance(uncertainties, six.string_types):
uncertainties =
|
pandas.read_csv(uncertainties, **kwargs)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIndex
import pandas.core.common as com
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_equal)
import pandas.util.testing as tm
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
raise nose.SkipTest('scipy.interpolate.pchip missing')
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
pass
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = {axis: list('ABCD')}
obj = self._construct(4, **kwargs)
# no values passed
# self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{axis: str.lower})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
self.assertEqual(s.get(i), d)
self.assertEqual(s.get(i, d), d)
self.assertEqual(s.get(i, "z"), d)
for other in others:
self.assertEqual(s.get(other, "z"), "z")
self.assertEqual(s.get(other, other), other)
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
self.assertRaises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
com.pprint_thing("this works and shouldn't")
self.assertRaises(ValueError, f)
self.assertRaises(ValueError, lambda: obj1 and obj2)
self.assertRaises(ValueError, lambda: obj1 or obj2)
self.assertRaises(ValueError, lambda: not obj1)
def test_numpy_1_7_compat_numeric_methods(self):
# GH 4435
# numpy in 1.7 tries to pass addtional arguments to pandas functions
o = self._construct(shape=4)
for op in ['min', 'max', 'max', 'var', 'std', 'prod', 'sum', 'cumsum',
'cumprod', 'median', 'skew', 'kurt', 'compound', 'cummax',
'cummin', 'all', 'any']:
f = getattr(np, op, None)
if f is not None:
f(o)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return self._construct(shape=3, dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
self.assertIsNone(v)
else:
self.assertEqual(v, getattr(y, m, None))
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
raise nose.SkipTest('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
# Check for error when random_state argument invalid.
with tm.assertRaises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with tm.assertRaises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with tm.assertRaises(ValueError):
o.sample(n=-3)
with tm.assertRaises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with tm.assertRaises(ValueError):
o.sample(n=3.2)
# Check lengths are right
self.assertTrue(len(o.sample(n=4) == 4))
self.assertTrue(len(o.sample(frac=0.34) == 3))
self.assertTrue(len(o.sample(frac=0.36) == 4))
###
# Check weights
###
# Weight length must be right
with tm.assertRaises(ValueError):
o.sample(n=3, weights=[0, 1])
with tm.assertRaises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with tm.assertRaises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with tm.assertRaises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with tm.assertRaises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with tm.assertRaises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=nan_weights)
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with tm.assertRaises(ValueError):
s.sample(n=3, weights='weight_column')
panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with tm.assertRaises(ValueError):
panel.sample(n=1, weights='weight_column')
with tm.assertRaises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with tm.assertRaises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with tm.assertRaises(ValueError):
df.sample(n=1, axis=2)
with tm.assertRaises(ValueError):
df.sample(n=1, axis='not_a_name')
with tm.assertRaises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with tm.assertRaises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10})
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
assert_frame_equal(sample1, df[['colString']])
# Test default axes
p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
minor_axis=[1, 3, 5])
assert_panel_equal(
p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
random_state=42))
assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
{'col1': [5, 6, 7],
'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with tm.assertRaises(ValueError):
df.sample(1, weights=s4)
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
self.assertTrue(o.size == np.prod(o.shape))
self.assertTrue(o.size == 10 ** len(o.axes))
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
self.assertTrue(len(np.array_split(o, 5)) == 5)
self.assertTrue(len(np.array_split(o, 2)) == 2)
def test_unexpected_keyword(self): # GH8597
from pandas.util.testing import assertRaisesRegexp
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ts.fillna(0, in_place=True)
class TestSeries(tm.TestCase, Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x, y)
def setUp(self):
self.ts = tm.makeTimeSeries() # Was at top level in test_series
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
def test_rename_mi(self):
s = Series([11, 21, 31],
index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]]))
s.rename(str.lower)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1, 2, 3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1, '2', 3.])
result = o._get_numeric_data()
expected = Series([], dtype=object, index=pd.Index([], dtype=object))
self._compare(result, expected)
o = Series([True, False, True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True, False, True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range('20130101', periods=3))
result = o._get_numeric_data()
expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object))
self._compare(result, expected)
def test_nonzero_single_element(self):
# allow single item via bool method
s = Series([True])
self.assertTrue(s.bool())
s = Series([False])
self.assertFalse(s.bool())
# single item nan to raise
for s in [Series([np.nan]), Series([pd.NaT]), Series([True]),
Series([False])]:
self.assertRaises(ValueError, lambda: bool(s))
for s in [Series([np.nan]), Series([pd.NaT])]:
self.assertRaises(ValueError, lambda: s.bool())
# multiple bool are still an error
for s in [Series([True, True]), Series([False, False])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
# single non-bool are an error
for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
def test_metadata_propagation_indiv(self):
# check that the metadata matches up on the resulting ops
o = Series(range(3), range(3))
o.name = 'foo'
o2 = Series(range(3), range(3))
o2.name = 'bar'
result = o.T
self.check_metadata(o, result)
# resample
ts = Series(np.random.rand(1000),
index=date_range('20130101', periods=1000, freq='s'),
name='foo')
result = ts.resample('1T').mean()
self.check_metadata(ts, result)
result = ts.resample('1T').min()
self.check_metadata(ts, result)
result = ts.resample('1T').apply(lambda x: x.sum())
self.check_metadata(ts, result)
_metadata = Series._metadata
_finalize = Series.__finalize__
Series._metadata = ['name', 'filename']
o.filename = 'foo'
o2.filename = 'bar'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat' and name == 'filename':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
Series.__finalize__ = finalize
result = pd.concat([o, o2])
self.assertEqual(result.filename, 'foo+bar')
self.assertIsNone(result.name)
# reset
Series._metadata = _metadata
Series.__finalize__ = _finalize
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_numpy_array_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_numpy_array_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interp_regression(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
self.assertRaises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
self.assertRaises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
|
assert_series_equal(result, expected)
|
pandas.util.testing.assert_series_equal
|
"""The :py:mod:`output` module defines defines a :class:`Output` object
and derived classes that handle file output for batch jobs of wrapped
:any:`Simulation` module runs.
Class Definitions
+++++++++++++++++
"""
from pathlib import Path
import pandas as pd
import h5py
import json
import warnings
from typing import Dict, List, Any, Optional, Union
import pkg_resources
# avoid explicit dependence on cantera
try:
pkg_resources.get_distribution('cantera')
except pkg_resources.DistributionNotFound:
ct = ImportError('Method requires a working cantera installation.')
else:
import cantera as ct
class Output:
"""Class handling file output
Arguments:
settings: Dictionary specifying output settings
file_name: filename (overrides settings enty)
file_path: output path (overrides settings entryl)
"""
_ext = [None]
def __init__(
self,
settings: Dict[str, Any],
file_name: Optional[str]=None,
file_path: Optional[str]=None
):
"""Constructor"""
file_format = settings.pop('format', None)
if isinstance(file_format, str):
file_format = '.' + file_format.lstrip('.')
if 'force_overwrite' in settings:
settings['force'] = settings.pop('force_overwrite')
warnings.warn("Key 'force_overwrite' is replaced by 'force'",
PendingDeprecationWarning)
if 'file_name' in settings:
settings['name'] = settings.pop('file_name')
warnings.warn("Key 'file_name' is replaced by 'name'",
PendingDeprecationWarning)
file_path = settings.pop('path', file_path)
if file_path is not None:
file_path = str(file_path)
# file name keyword overrides dictionary
if file_name is None:
file_name = Path(settings.pop('name'))
if file_format is None:
file_format = file_name.suffix
file_name = file_name.stem
else:
# file_name may contain path information
head = Path(file_name).parent
file_name = Path(file_name).name
if str(head) != "." and file_path is None:
file_path = str(head)
elif str(head) != ".":
raise RuntimeError('Contradictory path specifications')
tmp = Path(file_name).suffix
if tmp:
file_format = tmp
file_name = Path(file_name).stem
# ensure extension matches object type
if file_format not in self._ext:
raise ValueError("Incompatible output type for class {}: {} is "
"not in {}".format(type(self), file_format, self._ext))
self.force = settings.pop('force', False)
self.mode = settings.pop('mode', 'a')
self.path = file_path
self.name = file_name + file_format
self.kwargs = settings.copy()
@property
def output_name(self):
"""Return output name"""
if self.path is None:
return str(self.name)
else:
out = Path(self.path) / self.name
return str(out)
@property
def settings(self):
"""Output settings"""
out = {
'format' : Path(self.name).suffix.lstrip('.'),
'name': self.name,
'path': self.path,
'force': self.force,
'mode': self.mode
}
return {**out, **self.kwargs}
@classmethod
def from_dict(
cls,
settings: Dict[str, Any],
file_name: Optional[str]=None,
file_path: Optional[str]=None
) -> 'Output':
"""Factory loader for :class:`Output` objects
Arguments:
settings: Dictionary containing output settings
"""
ext = settings.get('format')
if ext is None:
return Output(settings.copy(), file_name, file_path)
ext = '.' + ext
if ext in WriteHDF._ext:
return WriteHDF(settings.copy(), file_name, file_path)
if ext in WriteCSV._ext:
return WriteCSV(settings.copy(), file_name, file_path)
raise NotImplementedError("Invalid file format {}".format(ext))
def save(
self,
data: Any,
entry: str,
variation: Optional[Dict]=None,
mode: Optional[str]='a',
errored: Optional[bool]=False
) -> bool:
"""Save output
Arguments:
data: Data to be saved
entry: Description of simulation task
variation: Parameter values
mode: Save mode
errored: Boolean describing success of simulation task
Returns:
`True` if data are saved successfully
"""
raise NotImplementedError("Needs to be overloaded by derived methods")
def dir(self) -> List[str]:
"""List previously saved cases"""
raise NotImplementedError("Needs to be overloaded by derived methods")
def load_like(self, entry: str, other: Any) -> Any:
"""Load previously saved output
Arguments:
entry: Label of entry to be loaded
other: Object of the same type as the one to be loaded
"""
raise NotImplementedError("Needs to be overloaded by derived methods")
def finalize(
self,
metadata: Dict[str, Any]
) -> bool:
"""Save metadata
Arguments:
metadata: Metadata to be appended to the output file
Returns:
`True` if metadata are saved successfully
"""
raise NotImplementedError("Needs to be overloaded by derived methods")
class WriteCSV(Output):
"""Class writing CSV output"""
_ext = ['.csv']
def save(self, data, entry, variation=None, mode=None, errored=False):
""
if not data:
return
returns = self.kwargs.get('returns')
# key, value = next(iter(data.items()))
if type(data).__name__ == 'Solution':
if isinstance(ct, ImportError):
raise ct # pylint: disable=raising-bad-type
# use cantera native route to pandas.Series via SolutionArray.to_pandas
arr = ct.SolutionArray(data, 1)
data = arr.to_pandas(cols=list(returns.values())).iloc[0]
elif type(data).__name__ == 'Mixture':
# there is no native route in cantera
out = []
for k, v in returns.items():
val = getattr(data, str(v))
if hasattr(data, k) and isinstance(getattr(data, k), list):
out.extend(zip(getattr(data, k), val))
else:
out.append((k, val))
data = pd.Series(dict(out))
if isinstance(data, pd.Series):
if isinstance(variation, dict):
var = {k.replace('.', '_'): v for k, v in variation.items()}
data = pd.concat([pd.Series(var), data])
row = pd.concat([pd.Series({'output': entry}), data])
fname = Path(self.output_name)
if fname.is_file():
df =
|
pd.read_csv(fname)
|
pandas.read_csv
|
import os
import glob
import pandas as pd
import numpy as np
from scipy import stats
from sklearn.linear_model import LinearRegression
import random
from ipyleaflet import Map
from ipyleaflet import basemaps
from ipyleaflet import (Map, basemaps, WidgetControl, GeoJSON,
LayersControl, Icon, Marker,FullScreenControl,
CircleMarker, Popup, AwesomeIcon)
from ipywidgets import HTML
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import codecs
class functions:
def __init__(self, data):
validation = self.data_validation(data)
if(validation[0]):
columns = self.has_columns(data)
if(columns[0]):
self.data = data
print('Successfully imported the data!\n')
else:
print(columns[1])
else:
print(validation[1])
def has_columns(self,data):
if set(['mean_2m_air_temperature','minimum_2m_air_temperature','maximum_2m_air_temperature','total_precipitation','Date']).issubset(data.columns):
return (True,None)
else:
return (False,"Data is missing columns")
def data_validation(self,data):
if (data['Date'].dtype=='datetime64[ns]'):
return (True,None)
else:
return (False, "Date has to be in datetime format")
def extreme_degree_days(self,data,thresholdtemp, year, months=[1,12]):
sum=self.integral_time(data,thresholdtemp,'above',year,months)
new_df=pd.DataFrame()
tempdata=data[data['Date'].dt.year==year]
for i in range(months[0],months[1]+1):
new_df=new_df.append(tempdata[tempdata['Date'].dt.month==i], ignore_index = True)
index=new_df.index
extreme_degree_days=sum/len(index)
return extreme_degree_days
def integral_time(self,data, threshold, area, year, months):
sum=0
new_df=pd.DataFrame()
tempdata=data[data['Date'].dt.year==year]
for i in range(months[0],months[1]+1):
new_df=new_df.append(tempdata[tempdata['Date'].dt.month==i], ignore_index = True)
if area=='above':
for i, j in new_df.iterrows():
sum+=max((j['mean_2m_air_temperature']-threshold),0)
if area=='below':
for i, j in new_df.iterrows():
sum+=max((threshold-j['mean_2m_air_temperature']),0)
return sum
def growing_degree_days(self,data,year,basetemp):
sum=0
k=0
new_df=data[data['Date'].dt.year==year]
for i, j in new_df.iterrows():
temp=(((j['minimum_2m_air_temperature']+j['maximum_2m_air_temperature'])/2)- basetemp)
if (temp>0):
sum+=temp
k+=1
gdd=sum/k
return gdd
def growingdays_basetemp(self,crop):
if crop in ["wheat", "barley", "rye", "oats", "flaxseed", "lettuce", "asparagus"]:
return 4.5
elif crop in ["sunflower","potato"]:
return 8
elif crop in ["maize", "sorghum","rice", "soybeans", "tomato", "coffee"]:
return 10
else:
print("The crop is not present. Look up base temperature for: [wheat,barley,rye,oats,flaxseed,lettuce,asparagus,sunflower,potato,maize,sorghum,rice,soybeans,tomato,coffee] instead")
return None
def average_temperature(self,data,year,months=[1,12]):
new_df=pd.DataFrame()
tempdata=data[data['Date'].dt.year==year]
for i in range(months[0],months[1]+1):
new_df=new_df.append(tempdata[tempdata['Date'].dt.month==i], ignore_index = True)
avg=new_df['mean_2m_air_temperature'].mean()
return avg
def total_precipitation(self,data,year,months=[1,12]):
new_df=pd.DataFrame()
tempdata=data[data['Date'].dt.year==year]
for i in range(months[0],months[1]+1):
new_df=new_df.append(tempdata[tempdata['Date'].dt.month==i], ignore_index = True)
sum=new_df['total_precipitation'].sum()
return sum
def temptrend(self,data,years):
pvalT=[]
yearavg=[]
for year in range(years[0],years[1]+1):
new_df=data[data['Date'].dt.year==year]
avg=new_df['mean_2m_air_temperature'].mean()
yearavg.append(avg)
x = np.array(yearavg)
t = np.array([i for i in range(len(yearavg))])
reg = LinearRegression().fit(t.reshape(-1, 1), x)
p = stats.pearsonr(t, x)
pvalT=p
r=p[0]
return r,pvalT[1],reg.coef_[0]
def preciptrend(self,data,years):
pvalP=[]
yearavg=[]
for year in range(years[0],years[1]+1):
new_df=data[data['Date'].dt.year==year]
avg=new_df['total_precipitation'].mean()
yearavg.append(avg)
x = np.array(yearavg)
t = np.array([i for i in range(len(yearavg))])
reg = LinearRegression().fit(t.reshape(-1, 1), x)
p = stats.pearsonr(t, x)
pvalP=p
r=p[0]
return r,pvalP[1],reg.coef_[0]
def plotmap(self,metric,climatedf,coorddf,filepath,filename='Map'):
sel_cols = ['Location','Year',metric]
climatedf = climatedf[sel_cols]
climatedf=climatedf.reindex(columns = climatedf.columns.tolist()
+ ['color'])
color=[]
for (i,j) in climatedf.iterrows():
value=(j[metric]-climatedf[metric].min())/(climatedf[metric].max()-climatedf[metric].min())
if(value>0 and value<=(1/6)):
color.append('darkblue')
elif(value>(1/6) and value<=(2/6)):
color.append('blue')
elif(value>(2/6) and value<=(3/6)):
color.append('green')
elif(value>(3/6) and value<=(4/6)):
color.append('orange')
elif(value>(4/6) and value<=(5/6)):
color.append('red')
else:
color.append('darkred')
climatedf['color']=color
gps_color =
|
pd.merge(climatedf, coorddf, on=['Location'])
|
pandas.merge
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(
|
StringIO(data)
|
pandas.compat.StringIO
|
#-*- coding:utf-8 -*-
from pyecharts import Kline, Line, Page,Overlap,Bar,Pie,Timeline
from pandas import DataFrame as df
import re
import tushare as ts
import time
import pandas as pd
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def calculateMa(data, Daycount):
sum = 0
result = list( 0 for x in data)#used to calculate ma. Might be deprecated for future versions
for i in range(0 , Daycount):
sum = sum + data[i]
result[i] = sum/(i+1)
for i in range(Daycount, len(data)):
sum = sum - data[i-Daycount]+data[i]
result[i] = sum/Daycount
return result
def graphpage(items,startdate,enddate,option,width1, height1): #labels:复权ork线or分笔 option:hfq, qfq or 15, 30, D, etc
page = Page()
for i in items:#generate numbers of graphs according to numbers of queries in treewidget
j = re.split("-",i)
if len(j)==3:
a = generateline(j[1],j[2],startdate,enddate,option)#stock number, Type, startdate, enddate, 30 or 15 or days
if a is None:
continue
time = [d[0] for d in a]#get time from returned dictionary
if j[2]!="Kline":
if len(a[0])==4 and a[0][2]=="bar": #for 分笔data
overlap = Overlap()
form = [e[1] for e in a]
bar = Bar(j[0] + "-" + j[2], width=width1 * 10 / 11, height=(height1 * 10 / 11) / len(items))
bar.add(j[0] + "-" + j[2], time, form, yaxis_min = "dataMin",yaxis_max = "dataMax",is_datazoom_show = True, datazoom_type = "slider")
overlap.add(bar)
line = Line(j[0] + "price", width=width1 * 10 / 11, height=(height1 * 10 / 11) / len(items))
price = [e[3] for e in a]
line.add(j[0] + "price", time, price, yaxis_min = "dataMin",yaxis_max = "dataMax", is_datazoom_show = True, datazoom_type = "slider",
yaxis_type="value")
overlap.add(line,yaxis_index=1, is_add_yaxis=True)
page.add(overlap)
if len(a[0])==5 and a[0][3]=="pie":
overlap = Overlap()
timeline = Timeline(is_auto_play=False, timeline_bottom=0) #zip(namearray,valuearray,quarter,flag,num)
namearray = [c[0] for c in a]
valuearray = [d[1] for d in a]
quarter = [e[2] for e in a]
num = a[0][4]
for x in range(0, num / 10):
list1 = valuearray[x]
names = namearray[x]
quarters = quarter[x][0]
for idx, val in enumerate(list1):
list1[idx] = float(val)
pie = Pie(j[0]+"-"+"前十股东".decode("utf-8"),width=width1 * 10 / 11, height=(height1 * 10 / 11))
pie.add(j[0]+"-"+"前十股东".decode("utf-8"), names, list1, radius=[30, 55], is_legend_show=False,
is_label_show=True, label_formatter = "{b}: {c}\n{d}%")
# print list
# print names
# print quarterarray
timeline.add(pie, quarters)
# namearray = [y for y in namearray[x]]
timeline.render()
return
#need more statement
else:
form = [e[1] for e in a]#for not分笔 data
line = Line(j[0] + "-" + j[2], width=width1*10/11, height=(height1*10/11)/len(items))
line.add(j[0] + "-" + j[2], time, form, is_datazoom_show=True, datazoom_type="slider",yaxis_min="dataMin",yaxis_max="dataMax")
page.add(line)
else:
overlap = Overlap()#for k线
close = zip(*a)[2]
candle = [[x[1], x[2], x[3], x[4]] for x in a]
candlestick = Kline(j[0] + "-" + j[2], width=width1*10/11, height = (height1*10/11) / len(items))
candlestick.add(j[0], time, candle, is_datazoom_show=True, datazoom_type="slider",yaxis_interval = 1)
overlap.add(candlestick)
if len(close)>10:
ma10 = calculateMa(close, 10)
line1 = Line(title_color="#C0C0C0")
line1.add(j[0] + "-" + "MA10", time, ma10)
overlap.add(line1)
if len(close)>20:
ma20 = calculateMa(close, 20)
line2 = Line(title_color="#C0C0C0")
line2.add(j[0] + "-" + "MA20", time, ma20)
overlap.add(line2)
if len(close)>30:
ma30 = calculateMa(close, 30)
line3 = Line(title_color="#C0C0C0")
line3.add(j[0] + "-" + "MA30", time, ma30)
overlap.add(line3)
page.add(overlap)
else:
for k in range(1, len(j)/3):#if graphs are combined
j[3*k-1] = re.sub("\n&","",j[3*k-1])
sizearray=[]
#if j[1] != "Candlestick"
layout = Overlap()
for i in xrange(0, len(j),3):
array = j[i:i +3]
b = generateline(array[1],array[2],startdate,enddate,option)
if b is None:
continue
btime = [d[0] for d in b]
if array[2] != "Kline":
if len(b[0])==4 and b[0][2]=="bar":
form = [e[1] for e in b]
bar = Bar(array[0] + "-" + array[2], width=width1 * 10 / 11, height=(height1 * 10 / 11) / len(items))
bar.add(array[0] + "-" + array[2], btime, form, is_datazoom_show=True, datazoom_type="slider",
yaxis_min="dataMin", yaxis_max="dataMax")
layout.add(bar)
line = Line(array[0] + "price", width=width1 * 10 / 11, height=(height1 * 10 / 11) / len(items))
price = [e[3] for e in b]
line.add(array[0] + "price", btime, price, is_datazoom_show=True, datazoom_type="slider",
yaxis_min="dataMin", yaxis_type="value")
layout.add(line, yaxis_index=1, is_add_yaxis=True)
else:
line = Line(array[0] + "-" + array[2],width=width1*10/11, height=(height1*10/11) / len(items))
line.add(array[0]+"-"+array[2], btime, b, is_datazoom_show=True, yaxis_max = "dataMax", yaxis_min = "dataMin",datazoom_type="slider")
layout.add(line)
else:
candle = [[x[1], x[2], x[3], x[4]] for x in b]
candlestick = Kline(array[0] + "-" + array[1], width=width1*10/11,
height=(height1*10/11) / len(items))
candlestick.add(array[0], btime, candle, is_datazoom_show=True, datazoom_type=["slider"])
#if i == 0:
close = zip(*b)[2]
if len(close)>10:
ma10 = calculateMa(close, 10)
line4 = Line(title_color="#C0C0C0")
line4.add(array[0] + "-" + "MA10", btime, ma10)
layout.add(line4)
if len(close)>20:
ma20 = calculateMa(close, 20)
line5 = Line(title_color="#C0C0C0")
line5.add(array[0] + "-" + "MA20", btime, ma20)
layout.add(line5)
if len(close)>30:
ma30 = calculateMa(close, 30)
line6 = Line(title_color="#C0C0C0")
line6.add(array[0] + "-" + "MA30", btime, ma30)
layout.add(line6)
layout.add(candlestick)
page.add(layout)
page.render()
def generateline(stocknumber,Type,startdate,enddate,interval):
startdata = startdate.encode("ascii").replace("/","-").replace("\n","") #convert to tushare readable date
enddata = enddate.encode("ascii").replace("/","-").replace("\n","")
#print startdata
#print enddata
current_time = time.strftime("%Y/%m/%d")
if Type == "分笔".decode("utf-8"):
if startdate!=current_time:
array = ts.get_tick_data(stocknumber, date = startdata)#分笔
if array is None:
return
array = array.sort_values("time")
date = array["time"].tolist()
amount = array["amount"].tolist()
atype = array["type"].tolist()
price = array["price"].tolist()
flag = ["bar" for i in date]
for idx,val in enumerate(atype):#if卖盘,交易变成负数
if val == "卖盘":
amount[idx] = -amount[idx]
if val == "中性盘":#if中性盘,则忽略. Might have a problem with this part??
amount[idx] = 0
returnarray = zip(date,amount,flag,price)
return returnarray
else:
array = ts.get_today_ticks(stocknumber)#Tushare里今日分笔和历史分笔需要分别对待
if array is None:
return
array = array.sort_values("time")
date = array["time"].tolist()
amount = array["amount"].tolist()
atype = array["type"].tolist()
flag = ["bar" for i in date]
for idx, val in enumerate(atype):
if val == "卖盘".decode("utf-8"):
amount[idx] = -amount[idx]
if val == "中性盘".decode("utf-8"):
amount[idx] = 0
returnarray = zip(date, amount, flag)
return returnarray
if Type=="季度饼图".decode("utf-8"):
datestr = startdate.split("/")
thisyear = datestr[0]
df2 = ts.top10_holders(code=stocknumber, gdtype="1")
test = df2[1]["quarter"].tolist()
df_ready = df2[1]
idxlist = []
for idx, val in enumerate(test):
a = val.split("-")
if a[0] == thisyear:
# print a[0],idx
idxlist.append(idx)
thing = df_ready.loc[idxlist]
thing = thing.sort_values(["quarter", "name"])
# print a[0],id
name = thing["name"].tolist()
value = thing["hold"].tolist()
quarter = thing["quarter"].tolist()
namearray = [name[i:i + 10] for i in xrange(0, len(name), 10)]
valuearray = [value[j:j + 10] for j in xrange(0, len(value), 10)]
quarterarray = [quarter[k:k + 10] for k in xrange(0, len(quarter), 10)]
flag = ["pie" for i in namearray]
num = [len(value) for k in namearray]
returnarray = zip(namearray,valuearray,quarterarray,flag,num)
return returnarray
if interval!="qfq" and interval!="hfq":
if interval=="1min" or interval=="5min" or interval=="15min" or interval=="30min" or interval=="60min":
df = ts.get_tick_data(stocknumber, date=startdata)
df.sort_values("time")
a = startdata + " " + df["time"]
df["time"] = a
df["time"] =
|
pd.to_datetime(a)
|
pandas.to_datetime
|
#python imports
import os
import gc
import string
import random
import time
import pickle
import shutil
from datetime import datetime
#internal imports
from modules.Signal import Signal
from modules.Database import Database
from modules.Predictor import Classifier, ComplexBuilder
from modules.utils import calculateDistanceP, chunks, cleanPath, minMaxNorm, extractMeanByBounds, extractMetricByShiftBounds
import joblib
from joblib import Parallel, delayed, dump, load
import pandas as pd
import numpy as np
from collections import OrderedDict
from itertools import combinations
from multiprocessing import Pool, Value
from joblib import wrap_non_picklable_objects
#plotting
import matplotlib.pyplot as plt
import seaborn as sns
#sklearn imports
from sklearn.metrics import classification_report, homogeneity_score, v_measure_score, completeness_score
from sklearn.model_selection import ParameterGrid
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import RadiusNeighborsRegressor, KNeighborsRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale, minmax_scale, robust_scale
from scipy.stats import ttest_ind, f_oneway
#dimensional reduction
import umap
__VERSION__ = "0.4.48"
filePath = os.path.dirname(os.path.realpath(__file__))
pathToTmp = os.path.join(filePath,"tmp")
alignModels = { "LinearRegression": LinearRegression,
"RadiusNeighborsRegressor" : RadiusNeighborsRegressor,
"KNeighborsRegressor":KNeighborsRegressor
}
alignModelsParams = {
"LinearRegression": {},
"RadiusNeighborsRegressor" : {"weights":"distance","radius":30} ,
"KNeighborsRegressor":{"weights":"distance","n_neighbors":10}
}
STACKING_CLASSIFIER_GRID = {
'rf__max_depth': [70,None,30],#30,,
#'rf__max_features': ['auto'],
# 'rf__min_samples_leaf': [2, 3, 5],
'rf__min_samples_split': [2,4],#[2, 3, 4],
#'rf__n_estimators': [200],
"SVM__C" : [1, 10,1000],
"SVM__kernel": ['rbf','poly'],
'SVM__gamma': [0.01,10,100]
}
OPTICS_PARAM_GRID = {
"min_samples":[2,3,5,8],
"max_eps": [np.inf,2,1,0.9,0.8],
"xi": np.linspace(0,0.3,num=30),
"cluster_method" : ["xi"]
}
AGGLO_PARAM_GRID = {
"n_clusters":[None,115,110,105,100,90,95],
"distance_threshold":[None,0.5,0.4,0.2,0.1,0.05,0.01],
"linkage":["complete","single","average"]
}
AFF_PRO_PARAM = {"damping":np.linspace(0.5,1,num=50)}
HDBSCAN_PROPS = {
"min_cluster_size":[2,3,4,6],
"min_samples":[2,3,4,5]
}
#{"min_cluster_size":[2,3,4,6],"min_samples":[2,3,4,5,8,10]}
CLUSTER_PARAMS = {
"OPTICS":OPTICS_PARAM_GRID,
"AGGLOMERATIVE_CLUSTERING":AGGLO_PARAM_GRID,
"AFFINITY_PROPAGATION":AFF_PRO_PARAM,
"HDBSCAN":HDBSCAN_PROPS
}
svm_param_grid = {
'C': [1, 10, 100, 1000],
'kernel': ['linear','rbf','poly'],
'gamma': [0.01,0.1,1,2,3,4,5]
}
RF_GRID_SEARCH = {
'max_depth': [70,None,30,50,10],#30,,,50,5
'max_features': ['auto'],
'min_samples_leaf': [2,5,3,15], # 5, 15
'min_samples_split': [2 ,3,10],
'n_estimators': [300, 500, 600]
}
entriesInChunks = dict()
class ComplexFinder(object):
def __init__(self,
addImpurity = 0.0,
alignMethod = "RadiusNeighborsRegressor",#"RadiusNeighborsRegressor",#"KNeighborsRegressor",#"LinearRegression", # RadiusNeighborsRegressor
alignRuns = False,
alignWindow = 3,
allowSingleFractionQuant = False,
analysisMode = "label-free", #[label-free,SILAC,SILAC-TMT]
analysisName = None,
binaryDatabase = False,
classifierClass = "random_forest",
classifierTestSize = 0.25,
classiferGridSearch = RF_GRID_SEARCH,#STACKING_CLASSIFIER_GRID,#
compTabFormat = False,
considerOnlyInteractionsPresentInAllRuns = 2,
correlationWindowSize = 5,
databaseFilter = {'Organism': ["Human"]},#{'Organism': ["Human"]},#{"Confidence" : [1,2,3,4]} - for hu.map2.0,# {} for HUMAN_COMPLEX_PORTAL
databaseIDColumn = "subunits(UniProt IDs)",
databaseFileName = "20190823_CORUM.txt",#"humap2.txt
databaseHasComplexAnnotations = True,
databaseEntrySplitString = ";",
decoySizeFactor = 1.2,
grouping = {"WT": ["D3_WT_03.txt"]},
hdbscanDefaultKwargs = {"min_cluster_size":4,"min_samples":1},
indexIsID = False,
idColumn = "Uniprot ID",
interactionProbabCutoff = 0.7,
justFitAndMatchPeaks = False,
keepOnlySignalsValidInAllConditions = False,
kFold = 3,
maxPeaksPerSignal = 15,
maxPeakCenterDifference = 1.8,
metrices = ["apex","pearson","euclidean","cosine","max_location","rollingCorrelation"], #"umap-dist"
metricesForPrediction = None,#["pearson","euclidean","apex"],
metricQuantileCutoff = 0.001,
minDistanceBetweenTwoPeaks = 3,
minimumPPsPerFeature = 6,
minPeakHeightOfMax = 0.05,
n_jobs = 12,
noDatabaseForPredictions = False,
normValueDict = {},
noDistanceCalculationAndPrediction = False,
peakModel = "LorentzianModel",#"GaussianModel",#"SkewedGaussianModel",#"LorentzianModel",
plotSignalProfiles = False,
plotComplexProfiles = False,
precision = 0.5,
r2Thresh = 0.85,
removeSingleDataPointPeaks = True,
restartAnalysis = False,
retrainClassifier = False,
recalculateDistance = False,
rollingWinType = None,
runName = None,
scaleRawDataBeforeDimensionalReduction = True,
smoothSignal = True,
smoothWindow = 2,
takeRondomSampleFromData =False,
topNCorrFeaturesForUMAPAlignment = 200,
TMTPoolMethod = "sum",
transformQuantDataBy = None,
useRawDataForDimensionalReduction = False,
useFWHMForQuant = True,
umapDefaultKwargs = {"min_dist":0.001,"n_neighbors":5,"n_components":2,"random_state":120},
quantFiles = [],
usePeakCentricFeatures = False
):
"""
Init ComplexFinder Class
Parameters
----------
* alignMethod = "RadiusNeighborsRegressor",
* alignRuns = False,
Alignment of runs is based on signal profiles that were found to have
a single modelled peak. A refrence run is assign by correlation anaylsis
and choosen based on a maximum R2 value. Then fraction-shifts per signal
profile is calculated (must be in the window given by *alignWindow*).
The fraction residuals are then modelled using the method provided in
*alignMethod*. Model peak centers are then adjusted based on the regression results.
Of note, the alignment is performed after peak-modelling and before distance calculations.
* alignWindow = 3,
Number of fraction +/- single-peal profile are accepted for the run alignment.
* analysisMode = "label-free", #[label-free,SILAC,SILAC-TMT]
* analysisName = None,
* binaryDatabase = False,
* classifierClass = "random_forest",
* classifierTestSize = 0.25,
Fraction of the created database containing positive and negative protein-protein
interactions that will be used for testing (for example ROC curve analysis) and classification report.
* classiferGridSearch = RF_GRID_SEARCH.
Dict with keywords matching parameters/settings of estimator (SVM, random forest)
and list of values forming the grid used to find the best estimator settings (evaluated
by k-fold cross validation). Runtime is effected by number of parameter settings as well as k-fold.
* compTabFormat = False
True indicates that the data are in the CompBat data format which was recently introduced.
In contrast to standard txt files generated by for example MaxQuant. It contains multiple
headers. More information can be found here https://www3.cmbi.umcn.nl/cedar/browse/comptab
ComplexFinder will try to identifiy the samples and fractions and create separeted txt files.
* considerOnlyInteractionsPresentInAllRuns = 2,
Can be either bool to filter for protein - protein interactions that are present
in all runs. If an integer is provided. the pp interactions are filtered based on
the number of runs in which they were quantified. A value of 4 would indicate that
the pp interaction must have been predicted in all runs.
* correlationWindowSize = 5,
Number of fractions used for rolling pearson correlation
* databaseFilter = {'Organism': ["Human"]},
Filter dict used to find relevant complexes from database. By default,
the corum database is filtered based on the column 'Organism' using 'Mouse' as a search string.
If no filtering is required, pass an empty dict {}.
* databaseIDColumn = "subunits(UniProt IDs)",
* databaseFileName = "20190823_CORUM.txt",
* databaseHasComplexAnnotations = True,
Indicates if the provided database does contain complex annotations. If you have a database with
only pairwise interactions, this setting should be *False*. Clusters are identified by dimensional
reduction and density based clustering (HDBSCAN). In order to alter UMAP and HDBSCAN settings use the
kewywords *hdbscanDefaultKwargs* and *umapDefaultKwargs*.
* databaseEntrySplitString = ";",
String by which complex members are separated in the provided database. CORUM = ";", Embl ComplexMap = "|"
* decoySizeFactor = 1.2,
Size factor for creating the decoy database from the positive proterin connectivity database such as CORUM.
* grouping = {"WT": ["D3_WT_04.txt","D3_WT_02.txt"],"KO":["D3_KO_01.txt","D3_KO_02.txt"]},
None or dict. Indicates which samples (file) belong to one group. Let's assume 4 files with the name
'KO_01.txt', 'KO_02.txt', 'WT_01.txt' and 'WT_02.txt' are being analysed.
The grouping dict should like this : {"KO":[KO_01.txt','KO_02.txt'],"WT":['WT_01.txt','WT_02.txt']}
in order to combine them for statistical testing (e.g. t-test of log2 transformed peak-AUCs).
Note that when analysis multiple runs (e.g. grouping present) then calling ComplexFinder().run(X) - X must be a
path to a folder containing the files.
When using compTabFormat = True. Provide the sample name as <compTabFileName>:<SampleName>.
* hdbscanDefaultKwargs = {"min_cluster_size":4,"min_samples":1},
* indexIsID = False,
* idColumn = "Uniprot ID",
* interactionProbabCutoff = 0.7
Cutoff for estimator probability. Interactions with probabilities below threshold will be removed.
* keepOnlySignalsValidInAllConditions = False
If True, removes all Signals that were not found to be valid in all files (experiments).
* kFold = 3
Cross validation of classifier optimization.
* justFitAndMatchPeaks = False
If true, the pipeline stops after peak detection/model fitting and matching of peaks (if more than one file is supplied.)
* maxPeaksPerSignal = 15
Number of peaks allowed for on signal profile.
* maxPeakCenterDifference = 1.8
* metrices = ["apex","pearson","euclidean","p_pearson","max_location","umap-dist","rollingCorrelation"], Metrices to access distance between two profiles. Can be either a list of strings and/or dict. In case of a list of dicts, each dict must contain the keywords: 'fn' and 'name' providing a callable function with 'fn' that returns a single floating number and takes two arrays as an input.
* metricesForPrediction = None
* metricQuantileCutoff = 0.90
* minDistanceBetweenTwoPeaks = 3
Distance in fractions (int) between two peaks. Setting this to a smaller number results in more peaks.
* n_jobs = 12,
Number of workers to model peaks, to calculate distance pairs and to train and use the classifer.
* noDatabaseForPredictions = False,
If you want to use ComplexFinder without any database. Set this to *True*.
* normValueDict = {},
* noDistanceCalculationAndPrediction = False,
Set to *True* to use ComplexFinder without distance calculation and database prediction.
* peakModel = "GaussianModel",
Indicates which model should be used to model signal profiles. In principle all models from lmfit can be used.
However, the initial parameters are only optimized for GaussianModel and LaurentzianModel.
This might effect runtimes dramatically.
* plotSignalProfiles = False,
If True, each profile is plotted against the fractio along with the fitted models.
If you are concerned about time, you might set this to False at the cost of losing visible asessment of the fit quality.
* plotComplexProfiles = False,
* precision = 0.5
Precision to use to filter protein-protein interactions.
If None, the filtering will be performed based on the parameter *interactionProbabCutoff*.
* r2Thresh = 0.85
R2 threshold to accept a model fit. Models below the threshold will be ignored.
* removeSingleDataPointPeaks = True,
* restartAnalysis = False, bool.
Set True if you want to restart the anaylsis from scratch. If the tmp folder exsists, items and dirs will be deleted first.
* retrainClassifier = False,
Even if the trainedClassifier.sav file is found, the classifier is loaded and the training is skipped.
If you change the classifierGridSearch, you should set this to True.
This will ensure that the classifier training is never skipped.
* recalculateDistance = False,
* rollingWinType = None,
If None, all points are evenly weighted. Can be any string of scipy.signal window function.
(https://docs.scipy.org/doc/scipy/reference/signal.windows.html#module-scipy.signal.windows)
* runName = None,
* <del>savePeakModels = True</del> *depracted. always True and will be removed in the next version*.
* scaleRawDataBeforeDimensionalReduction = True,
If raw data should be used (*useRawDataForDimensionalReduction*)
enable this if you want to scale them. Scaling will be performed that values of each row are scaled between zero and one.
* smoothSignal = True
Enable/disable smoothing. Defaults to True. A moving average of at least 3 adjacent datapoints is calculated using
pandas rolling function. Effects the analysis time as well as the nmaximal number of peaks detected.
* smoothWindow = 2,
* topNCorrFeaturesForUMAPAlignment = 200,
Number of profiles used for UMAP Alignment. Only used if useRawDataForDimensionalReduction = True or noDistanceCalculationAndPrediction = True. The Features
will be identified by calculating the Pearson correlation coefficient.
* useRawDataForDimensionalReduction = False, Setting this to true, will force the pipeline to use the raw values for dimensional reduction. Distance calculations are not automatically turned off and the output is generated but they are not used.
* useFWHMForQuant = True
If quantFiles is specific, will use the FWHM for peak centric quantification. By default at least a mean of +/- peak centric fraction will
be consider (e.g. 3 fraction). However you cann allow single fraction quantification for narrow peaks by setting 'allowSingleFractionQuant' to True.
* umapDefaultKwargs = {"min_dist":0.0000001,"n_neighbors":3,"n_components":2},
If you want to perform an aligned UMPA consider altering the parameter alignment_window_size and alignment_regularisation. Find more information here
(https://umap-learn.readthedocs.io/en/latest/aligned_umap_basic_usage.html#aligning-varying-parameters)
* quantFiles = dict
* Quantifiaction files. dict with key name of co-fraction file and values with the path to the quantificaation file
Assuming your grouping is something like: {"WT":["WT_01.txt","WT_02.txt"]}. Then the quantification files must
contain a key for each file: something like {"WT_01.txt":"myCoolProject/quant/WT01_quant.txt","WT_02.txt":"myCoolProject/quant/WT02_quant.txt"}.
Assuming the folder myCoolProject/ exists where the main file is.
If analysing a TMT-SILAC experiment it is required to provide TMT labelings for heavy and light peaks separately, the
provided dict should look something like this:
{
"HEAVY_WT_01.txt":"myCoolProject/quant/WT01_quant_heavy.txt",
"LIGHT_WT_01.txt":"myCoolProject/quant/WT01_quant_light.txt"
}
Returns
-------
None
"""
self.params = {
"addImpurity" : addImpurity,
"indexIsID" : indexIsID,
"idColumn" : idColumn,
"n_jobs" : n_jobs,
"kFold" : kFold,
"analysisName" : analysisName,
"restartAnalysis" : restartAnalysis,
"metrices" : metrices,
"peakModel" : peakModel,
"smoothWindow" : smoothWindow,
"classifierClass" : classifierClass,
"retrainClassifier" : retrainClassifier,
"interactionProbabCutoff":interactionProbabCutoff,
"maxPeaksPerSignal" : maxPeaksPerSignal,
"maxPeakCenterDifference" : maxPeakCenterDifference,
"classiferGridSearch" : classiferGridSearch,
"plotSignalProfiles" : plotSignalProfiles,
"savePeakModels" : True, #must be true to process pipeline, depracted, remove from class arguments.
"removeSingleDataPointPeaks" : removeSingleDataPointPeaks,
"grouping" : grouping,
"analysisMode" : analysisMode,
"normValueDict" : normValueDict,
"databaseFilter" : databaseFilter,
"databaseIDColumn" : databaseIDColumn,
"databaseFileName" : databaseFileName,
"databaseHasComplexAnnotations" : databaseHasComplexAnnotations,
"r2Thresh" : r2Thresh,
"smoothSignal" : smoothSignal,
"umapDefaultKwargs" : umapDefaultKwargs,
"hdbscanDefaultKwargs" : hdbscanDefaultKwargs,
"noDatabaseForPredictions" : noDatabaseForPredictions,
"alignRuns" : alignRuns,
"alignMethod" : alignMethod,
"runName" : runName,
"useRawDataForDimensionalReduction" : useRawDataForDimensionalReduction,
"scaleRawDataBeforeDimensionalReduction" : scaleRawDataBeforeDimensionalReduction,
"metricQuantileCutoff": metricQuantileCutoff,
"recalculateDistance" : recalculateDistance,
"metricesForPrediction" : metricesForPrediction,
"minDistanceBetweenTwoPeaks" : minDistanceBetweenTwoPeaks,
"minimumPPsPerFeature" : minimumPPsPerFeature,
"plotComplexProfiles" : plotComplexProfiles,
"decoySizeFactor" : decoySizeFactor,
"classifierTestSize" : classifierTestSize,
"considerOnlyInteractionsPresentInAllRuns" : considerOnlyInteractionsPresentInAllRuns,
"precision" : precision,
"quantFiles" : quantFiles,
"compTabFormat" : compTabFormat,
"correlationWindowSize" : correlationWindowSize,
"takeRondomSampleFromData" : takeRondomSampleFromData,
"minPeakHeightOfMax" : minPeakHeightOfMax,
"justFitAndMatchPeaks" : justFitAndMatchPeaks,
"keepOnlySignalsValidInAllConditions" : keepOnlySignalsValidInAllConditions,
"noDistanceCalculationAndPrediction" : noDistanceCalculationAndPrediction,
"topNCorrFeaturesForUMAPAlignment" : topNCorrFeaturesForUMAPAlignment,
"databaseEntrySplitString": databaseEntrySplitString,
"version" : __VERSION__,
"usePeakCentricFeatures" : usePeakCentricFeatures,
"allowSingleFractionQuant" : allowSingleFractionQuant,
"useFWHMForQuant" : useFWHMForQuant,
"TMTPoolMethod" : TMTPoolMethod,
"transformQuantDataBy" : transformQuantDataBy
}
print("\n" + str(self.params))
self._checkParameterInput()
def _addMetricesToDB(self,analysisName):
"""
Adds distance metrices to the database entries
that were found in the co-elution profiles.
Parameters
----------
Returns
-------
None
"""
if self.params["noDistanceCalculationAndPrediction"]:
print("Info :: Skipping matching metrices to DB.")
return
if "signalDiff" in self.params["metrices"]:
self.params["metrices"] = [x for x in self.params["metrices"] if x != "signalDiff"] + ["{}-diff".format(x) for x in np.arange(self.Xs[analysisName].columns.size)]
metricColumns = self.params["metrices"]
if not self.params["noDatabaseForPredictions"]:
self.DB.matchMetrices(self.params["pathToTmp"][analysisName],entriesInChunks[analysisName],metricColumns,analysisName,forceRematch=self.params["recalculateDistance"])
def _addMetricToStats(self,metricName, value):
"""
Adds a metric to the stats data frame.
Does not check if metric is represent, if present,
it will just overwrite.
Parameters
----------
metricName str
Name of metric to add
value str
Value of metric
Returns
-------
None
"""
if metricName in self.stats.columns:
self.stats.loc[self.currentAnalysisName,metricName] = value
def _addModelToSignals(self,signalModels):
"""
Adds fitted models to Signals. If not a valid
model was found, then the signal profile is removed.
Parameters
----------
signalModels - list
List of modelfits (dict)
Returns
-------
None
"""
for fitModel in signalModels:
modelID = fitModel["id"]
if len(fitModel) == 1:
del self.Signals[self.currentAnalysisName][modelID]
if modelID in self.Signals[self.currentAnalysisName]:
for k,v in fitModel.items():
if k != 'id':
setattr(self.Signals[self.currentAnalysisName][modelID],k,v)
self.Signals[self.currentAnalysisName][modelID].saveResults()
def _attachQuantificationDetails(self, combinedPeakModels = None):
"""
"""
if self.params["analysisMode"] == "label-free":
if len(self.params["quantFiles"]) != 0:
print("Warning :: Quant files have been specified but anaylsis mode is label-free. Please define SILAC or TMT or SILAC-TMT")
print("Info :: Label-free mode selected. No additation quantification performed..")
return
if len(self.params["quantFiles"]) > 0:
files = np.array(list(self.params["grouping"].values())).flatten()
print(files)
print(self.params["quantFiles"].keys())
if len(self.params["quantFiles"]) != files.size and self.params["analysisMode"] != "SILAC-TMT":
print("Warning :: Different number of quantFiles and groupings provided.")
if self.params["analysisMode"] != "SILAC-TMT":
initFilesFound = [k for k in self.params["quantFiles"].keys() if k in files]
else:
print(self.params["quantFiles"])
for k in self.params["quantFiles"].keys():
print(k.split("HEAVY_",maxsplit=1))
initFilesFound = [k for k in self.params["quantFiles"].keys() if k.split("HEAVY_",maxsplit=1)[-1] in files or k.split("LIGHT_",maxsplit=1)[-1] in files]
print("Info :: For the following files and correpsonding co-elution profile data was detected")
print(initFilesFound)
print("Warning :: other files will be ignored.")
# elif self.params["analysisMode"] == "SILAC-TMT":
# if not all(f.startswith("HEAVY") or f.startswith("LIGHT") for f in self.params["quantFiles"].keys()):
# print("Warning :: If using a SILAC-TMT experiment, please provide 'HEAVY' and 'LIGHT' before the file in the dict 'quantFile' such as 'HEAVY_WT_01.txt':<path to quant file> as well as 'LIGHT_WT_01.txt':<path to quant file>")
print("combining Peaks!!")
if combinedPeakModels is None:
## load combined peak reuslts
txtOutput = os.path.join(self.params["pathToComb"],"CombinedPeakModelResults.txt")
if os.path.exists(txtOutput):
combinedPeakModels = pd.read_csv(txtOutput,sep="\t")
else:
print("Warning :: Combined peak model reuslts not found. Deleted? Skipping peak centric quantification.")
return
print("Info :: Starting peak centric quantification. In total {} peaks were found".format(combinedPeakModels.index.size))
print("Info :: Loading quantification files.")
if not all(os.path.exists(pathToQuantFile) for pathToQuantFile in self.params["quantFiles"].values()):
print("Warning :: Not all quant files found!")
if self.params["analysisMode"] != "SILAC-TMT":
print(self.params["quantFiles"].values())
path = list(self.params["quantFiles"].values())
print(os.path.abspath(path[0]))
quantFilesLoaded = [(k,pd.read_csv(v,sep="\t",index_col = 0),False) for k,v in self.params["quantFiles"].items() if os.path.exists(v) and k in initFilesFound]
else:
quantFilesLoaded = [(k.split("HEAVY_",maxsplit=1)[-1] if "HEAVY" in k else k.split("LIGHT_",maxsplit=1)[-1],pd.read_csv(v,sep="\t",index_col = 0),"LIGHT" in k) for k,v in self.params["quantFiles"].items() if os.path.exists(v) and k in initFilesFound]
if len(quantFilesLoaded) == 0:
print("Warning :: No quant files found. Skipping peak-centric quantification.")
return
if self.params["analysisMode"] == "SILAC":
print("Info :: Peak centric quantification using SILAC :: Assuming one SILAC ratio per fraction .")
elif self.params["analysisMode"] == "TMT":
print("Info :: Peak centric quantification using TMT :: Assuming the following order:")
print("Ignoring column headers, just uses the column index as follow..")
print("Fraction 1 - TMT reporter 1, Fraction 1 - TMT reporter 2, Faction 2 - TMT reporter 3 .... Fraction 2 - TMT reporter 1")
extractedQuantFiles = []
for k,quantFile,isLightQuantData in quantFilesLoaded:
print("Info :: Quantification of ", k)
centerColumnName = "Center_{}".format(k)
fwhmColumnName = "fwhm_{}".format(k)
quantFileName = "Q({})".format(k)
combinedPeakModelsFiltered = combinedPeakModels.dropna(subset=[centerColumnName])
lowerBound = combinedPeakModelsFiltered[centerColumnName] - combinedPeakModelsFiltered[fwhmColumnName]/1.7
upperBound = combinedPeakModelsFiltered[centerColumnName] + combinedPeakModelsFiltered[fwhmColumnName]/1.7
peakBounds = np.concatenate([lowerBound.values.reshape(-1,1),upperBound.values.reshape(-1,1)],axis=1)
peakBounds[:,1] += 1 #add one extra to use bounds as a range in python
#check bounds
peakBounds[peakBounds[:,0] < 0, 0] = 0
peakBounds[peakBounds[:,1] >= quantFile.columns.size, 1] = quantFile.columns.size - 1
#transform bounds to ints
peakBounds = np.around(peakBounds,0).astype(np.int64)
quantData = quantFile.loc[combinedPeakModelsFiltered["Key"].values].values
if self.params["analysisMode"] == "SILAC":
print("Info :: Peak centric quantification using SILAC :: extracting mean from file {}.".format(k))
out = extractMeanByBounds(
NPeakModels = combinedPeakModelsFiltered.index.size,
peakBounds = peakBounds,
quantData = quantData
)
quantColumnNames = ["SILAC({})_Mean".format(quantFileName),"SILAC({})_Error".format(quantFileName)]
print(out)
print(quantColumnNames)
dfResult = pd.DataFrame(out,index=combinedPeakModelsFiltered.index, columns = quantColumnNames)
dfResult = dfResult.join(pd.DataFrame(peakBounds,index=combinedPeakModelsFiltered.index, columns = ["SILAC({})_LowerBound".format(quantFileName),"SILAC({})_UpperBound".format(quantFileName)]))
extractedQuantFiles.append(dfResult)
elif self.params["analysisMode"] == "TMT":
print("Info :: Peak centric quantification using TMT :: extracting sum from TMT reporters using file {}".format(self.params["quantFiles"][k]))
print("Info :: Detecting reporter channles..")
nFractions = self.Xs[k].shape[1]
nTMTs = quantData.shape[1] / nFractions
print("Info :: {} reporter channels detected and {} fractions.".format(nTMTs,nFractions))
if nTMTs != int(nTMTs):
print("Warning :: Could not detect the number of TMT reporter channles. Please check columns in quantFiles to have nTMTx x fractions columns")
continue
nTMTs = int(nTMTs)
out = extractMetricByShiftBounds(
NPeakModels = combinedPeakModels.index.size,
peakBounds = peakBounds,
quantData = quantData,
shift = nTMTs,
nFractions = nFractions
)
quantColumnNames = []
dfResult = pd.DataFrame(out,index=combinedPeakModels.index, columns = quantColumnNames)
extractedQuantFiles.append(dfResult)
elif self.params["analysisMode"] == "SILAC-TMT":
print("Info :: Extracting quantification details from SILAC-TMT data.")
print("Info :: Detecting reporter channles..")
nFractions = self.Xs[k].shape[1]
nTMTs = quantData.shape[1] / nFractions
print("Info :: {} reporter channels detected and {} fractions.".format(nTMTs,nFractions))
if nTMTs != int(nTMTs):
print("Warning :: Could not detect the number of TMT reporter channles. Please check columns in quantFiles to have nTMTx x fractions columns")
continue
nTMTs = int(nTMTs)
# print(peakBounds)
# print(combinedPeakModels["Key"])
# print(isLightQuantData)
quantData[quantData == 0.0] = np.nan
out = extractMetricByShiftBounds(
NPeakModels = combinedPeakModels.index.size,
peakBounds = peakBounds,
quantData = quantData,
shift = nTMTs,
nFractions = nFractions
)
#print(out)
if isLightQuantData:
quantColumnNames = ["L_({})_tmt_intensity_{}".format(k,n) for n in range(nTMTs)]
else:
quantColumnNames = ["H_({})_tmt_intensity_{}".format(k,n) for n in range(nTMTs)]
# print(a)
dfResult = pd.DataFrame(out,index=combinedPeakModels.index, columns = quantColumnNames)
extractedQuantFiles.append(dfResult)
combinedPeakModels = combinedPeakModels.join(extractedQuantFiles)
txtOutput = os.path.join(self.params["pathToComb"],"CombinedPeakModelResultsQuant.txt")
combinedPeakModels.to_csv(txtOutput,sep="\t")
def _checkParameterInput(self):
"""
Checks the input.
Parameters
----------
Returns
-------
None
Raises
-------
ValueErrors if datatype if given parameters do not match.
"""
#check anaylsis mode
validModes = ["label-free","SILAC","SILAC-TMT","TMT"]
if self.params["analysisMode"] not in validModes:
raise ValueError("Parmaeter analysis mode is not valid. Must be one of: {}".format(validModes))
elif self.params["analysisMode"] != "label-free" and len(self.params["quantFiles"]) == 0:
raise ValueError("Length 'quantFiles must be at least 1 if the analysis mode is not set to 'label-free'.")
if not isinstance(self.params["maxPeaksPerSignal"],int):
raise ValueError("maxPeaksPerSignal must be an integer. Current setting: {}".forma(self.params["maxPeaksPerSignal"]))
elif self.params["maxPeaksPerSignal"] <= 2:
raise ValueError("maxPeaksPerSignal must be greater than or equal 2")
elif self.params["maxPeaksPerSignal"] > 20:
print("Warning :: maxPeaksPerSignal is set to above 20, this may take quite long to model.")
#r2 validation
if not isinstance(self.params["r2Thresh"],float):
raise ValueError("Parameter r2Trehsh mus be a floating number.")
elif self.params["r2Thresh"] < 0.5:
print("Warning :: threshold for r2 is set below 0.5. This might result in fits of poor quality")
elif self.params["r2Thresh"] > 0.95:
print("Warning :: threshold for r2 is above 0.95. Relatively few features might pass this limit.")
elif self.params["r2Thresh"] > 0.99:
raise ValueError("Threshold for r2 was above 0.99. Please set a lower value.")
#minPeakHeightOfMax
if not isinstance(self.params["minPeakHeightOfMax"],float) and self.params["minPeakHeightOfMax"] < 1 and self.params["minPeakHeightOfMax"] >= 0:
raise ValueError("Parameter 'minPeakHeightOfMax' must be a float smaller than 1.0 and greather/equal 0.0.")
#k-fold
if not isinstance(self.params["kFold"],int):
raise ValueError("Parameter kFold mus be an integer.")
elif self.params["kFold"] < 2:
raise ValueError("Parameter kFold must be at least 2.")
if self.params["alignMethod"] not in alignModels:
raise ValueError("Parameter alignMethod must be in {}".format(alignModels.values()))
if not isinstance(self.params["metricQuantileCutoff"],float) or self.params["metricQuantileCutoff"] <= 0 or self.params["metricQuantileCutoff"] >= 1:
raise ValueError("Parameter metricQuantileCutoff must be a float greater than 0 and smaller than 1.")
#add database checks
if self.params["metricesForPrediction"] is not None:
if not isinstance(self.params["metricesForPrediction"],list):
raise TypeError("metricesForPrediction must be a list.")
else:
if not all(x in self.params["metrices"] for x in self.params["metricesForPrediction"]):
raise ValueError("All metrices given in 'metricesForPrediction' must be present in 'metrices'.")
else:
self.params["metricesForPrediction"] = self.params["metrices"]
def _chunkPrediction(self,pathToChunk,classifier,nMetrices,probCutoff):
"""
Predicts for each chunk the proability for positive interactions.
Parameters
----------
pathToChunk : str
classifier : classfierClass
Trained classifier.
nMetrices : int
Number if metrices used. (since chunks are simple numpy arrays, no column headers are loaded)
probCutoff : float
Probability cutoff.
Returns
-------
Numpy array. Chunks with appended probability.
"""
X = np.load(pathToChunk,allow_pickle=True)
boolSelfIntIdx = X[:,0] != X[:,1]
X = X[boolSelfIntIdx]
classProba = classifier.predict(X[:,[n+3 for n in range(nMetrices)]])
#boolPredIdx = classProba >= probCutoff
#boolIdx = np.sum(boolPredIdx,axis=1) > 0
predX = np.append(X[:,2],classProba.reshape(X.shape[0],-1),axis=1)
np.save(
file = pathToChunk,
arr = predX)
return predX
def _load(self, X):
"""
Intitiates data.
Parameters
----------
X pd.DataFrame
Returns
-------
None
Raises
-------
ValueError if X is not a pandas data frame.
"""
if isinstance(X, pd.DataFrame):
self.X = X
if not self.params["indexIsID"]:
print("Info :: Checking for duplicates")
dupRemoved = self.X.drop_duplicates(subset=[self.params["idColumn"]])
if dupRemoved.index.size < self.X.index.size:
print("Warning :: Duplicates detected.")
print("File contained duplicate ids which will be removed: {}".format(self.X.index.size-dupRemoved.index.size))
self.X = dupRemoved
self.X = self.X.set_index(self.params["idColumn"])
self.X = self.X.astype(np.float32)
else:
self.X = self.X.loc[self.X.index.drop_duplicates()] #remove duplicaates
self.X = self.X.astype(np.float32) #set dtype to 32 to save memory
if self.params["takeRondomSampleFromData"] != False and self.params["takeRondomSampleFromData"] > 50:
self.X = self.X.sample(self.params["takeRondomSampleFromData"])
print("Random samples taken from data. New data size {}".format(self.X.index.size))
self.params["rawData"][self.currentAnalysisName] = self.X.copy()
else:
raise ValueError("X must be a pandas data frame")
def _loadReferenceDB(self):
"""
Load reference database.
filterDB (dict) is passed to the pandas pd.DataFrame.isin function.
Parameters
----------
Returns
-------
None
"""
if self.params["noDistanceCalculationAndPrediction"]:
print("noDistanceCalculationAndPrediction was enabled. No database laoded.")
return
if self.params["noDatabaseForPredictions"]:
print("Info :: Parameter noDatabaseForPredictions was set to True. No database laoded.")
return
print("Info :: Load positive set from data base")
if not hasattr(self,"DB"):
self.DB = Database(nJobs = self.params["n_jobs"], splitString=self.params["databaseEntrySplitString"])
pathToDatabase = os.path.join(self.params["pathToComb"], "InteractionDatabase.txt")
if os.path.exists(pathToDatabase):
dbSize = self.DB.loadDatabaseFromFile(pathToDatabase)
print("Info :: Database found and loaded. Contains {} positive interactions.".format(dbSize))
# self._addMetricToStats("nPositiveInteractions",dbSize)
else:
self.DB.pariwiseProteinInteractions(
self.params["databaseIDColumn"],
dbID = self.params["databaseFileName"],
filterDb=self.params["databaseFilter"])
entryList = []
for analysisName in self.params["analysisName"]:
entryList.extend([entryID for entryID,Signal in self.Signals[analysisName].items() if Signal.valid])
entryList = np.unique(np.array(entryList).flatten())
print("Info :: Features used for filtering: {}".format(len(entryList)))
dbSize = self.DB.filterDBByEntryList(entryList)
#add decoy to db
if dbSize == 0:
raise ValueError("Warning :: No hits found in database. Check dabaseFilter keyword.")
elif dbSize < 150:
raise ValueError("Warining :: Less than 150 pairwise interactions found.")
elif dbSize < 200:
#raise ValueError("Filtered positive database contains less than 200 interactions..")
print("Warning :: Filtered positive database contains less than 200 interactions.. {}".format(dbSize))
print("Warning :: Please check carefully, if the classifier has enough predictive power.")
self.DB.addDecoy(sizeFraction=self.params["decoySizeFactor"])
self.DB.df.to_csv(pathToDatabase,sep="\t")
print("Info :: Database saved to {}".format(pathToDatabase))
def _checkGroups(self):
"Checks grouping. For comparision of multiple co-elution data sets."
if isinstance(self.params["grouping"],dict):
if len(self.params["grouping"]) == 0:
raise ValueError("Example for grouping : {'KO':['KO_01.txt','KO_02.txt'], 'WT':['WT_01.txt','WT_02.txt'] } Aborting.. ")
else:
combinedSamples = sum(self.params["grouping"].values(), [])
if all(x in combinedSamples for x in self.params["analysisName"]):
print("Grouping checked..\nAll columnSuffixes found in grouping.")
print("If you are using the combat format, the grouping has to be named as '<combatFileName><sample name>")
else:
raise ValueError("Could not find all grouping names in loaded dataframe.. Aborting ..")
def _findPeaks(self, n_jobs=3):
"""
Initiates for each feature in the data a Signal instance.
Peak detection and modelling is then performed.
Results are saved to hard drive for each run.
Numerous parameters effect signal modelling (smoothing, maxPeaks, r2Thresh, ...)
Create self.Signals (OrderedDict) which is a dict. Key = analysisName, which
contains another dict with entries as keys and values are of type Signal class.
Parameters
----------
n_jobs int. Number of worker processes.
Returns
-------
None
"""
if self.allSamplesFound:
print("Info :: Signals loaded and found. Proceeding ...")
return
pathToSignal = os.path.join(self.params["pathToComb"],"signals.lzma")
if os.path.exists(pathToSignal):
self.Signals = load(pathToSignal)
print("\nLoading pickled signal intensity")
if all(analysisName in self.Signals for analysisName in self.params["analysisName"]):
print("Info :: All samples found in loaded Signals..")
self.allSamplesFound = True
return
if not hasattr(self , "Signals"):
self.Signals = OrderedDict()
self.Signals[self.currentAnalysisName] = dict()
peakModel = self.params['peakModel']
for entryID, signal in self.X.iterrows():
self.Signals[self.currentAnalysisName][entryID] = Signal(signal.values,
ID = entryID,
peakModel = peakModel,
smoothSignal = self.params["smoothSignal"],
savePlots = self.params["plotSignalProfiles"],
savePeakModels = self.params["savePeakModels"],
maxPeaks = self.params["maxPeaksPerSignal"],
metrices = self.params["metrices"],
pathToTmp = self.params["pathToTmp"][self.currentAnalysisName],
normalizationValue = self.params["normValueDict"][entryID] if entryID in self.params["normValueDict"] else None,
removeSingleDataPointPeaks = self.params["removeSingleDataPointPeaks"],
analysisName = self.currentAnalysisName,
r2Thresh = self.params["r2Thresh"],
smoothRollingWindow = self.params["smoothWindow"],
minDistanceBetweenTwoPeaks = self.params["minDistanceBetweenTwoPeaks"],
minPeakHeightOfMax = self.params["minPeakHeightOfMax"])
t1 = time.time()
print("\n\nStarting Signal modelling .. (n_jobs = {})".format(n_jobs))
fittedModels = Parallel(n_jobs=n_jobs, verbose=1)(delayed(Signal.fitModel)() for Signal in self.Signals[self.currentAnalysisName].values())
self._addModelToSignals(fittedModels)
self._saveSignalFitStatistics()
print("Peak fitting done time : {} secs".format(round((time.time()-t1))))
print("Each feature's fitted models is stored as pdf and txt is stored in model plots (if savePeakModels and plotSignalProfiles was set to true)")
def _saveSignals(self):
""
if hasattr(self,"Signals") :
pathToSignal = os.path.join(self.params["pathToComb"],"signals.lzma")
dump(self.Signals.copy(),pathToSignal)
self.Xs = OrderedDict()
for analysisName in self.params["analysisName"]:
pathToFile = os.path.join(self.params["pathToTmp"][analysisName],"validProcessedSignals({}).txt".format(analysisName))
signals = self.Signals[analysisName]
validSignalData = dict([(k,v.Y) for k,v in signals.items() if v.valid and v.validModel])
fitDataSignal = dict([(k,v.fitSignal.flatten()) for k,v in signals.items() if v.valid and v.validModel and v.fitSignal is not None])
dfProcessedSignal = pd.DataFrame().from_dict(validSignalData,orient="index")
dfFit = pd.DataFrame().from_dict(fitDataSignal, orient="index")
if self.params["removeSingleDataPointPeaks"]:
numberofPeaks = dict([(k,v.removedDataPoints) for k,v in signals.items() if v.valid and v.validModel and v.fitSignal is not None])
nRemovedData = pd.DataFrame().from_dict(numberofPeaks,orient="index")
nRemovedData.columns = ["#removedDataPoints"]
dfFit = dfFit.join(nRemovedData)
#print(self.params["rawData"][analysisName].index)
df = dfProcessedSignal.join(self.params["rawData"][analysisName],rsuffix="_raw",lsuffix="_processed")
df = df.join(dfFit,rsuffix = "_fit")
df.to_csv(pathToFile,sep="\t")
self.Xs[analysisName] = dfProcessedSignal
X = self.Xs[analysisName].reset_index()
np.save(os.path.join(self.params["pathToTmp"][analysisName],"source.npy"),X.values)
for analysisName in self.params["analysisName"]:
#clean invalid signals
if self.params["keepOnlySignalsValidInAllConditions"]:
toDelete = [k for k,v in self.Signals[analysisName].items() if not all(k in self.Signals[analysisName] and self.Signals[analysisName][k].valid for analysisName in self.params["analysisName"])]
else:
toDelete = [k for k,v in self.Signals[analysisName].items() if not v.valid]
#delete Signals that do match criteria
for k in toDelete:
del self.Signals[analysisName][k]
def _calculateDistance(self):
"""
Calculates Distance between protein protein pairs based
on their signal profile.
Parameters
----------
signalModels - list
List of modelfits (dict)
Returns
-------
None
"""
if self.params["noDistanceCalculationAndPrediction"]:
print("noDistanceCalculationAndPrediction was enabled. Skipping Distance Calculations.")
return
global entriesInChunks
print("\nStarting Distance Calculation ...")
t1 = time.time()
chunks = self.signalChunks[self.currentAnalysisName]
#return
entrieChunkPath = os.path.join(self.params["pathToComb"], "entriesInChunk.pkl")
if not self.params["recalculateDistance"] and all(os.path.exists(x.replace(".pkl",".npy")) for x in chunks) and os.path.exists(entrieChunkPath):
print("All chunks found for distance calculation.")
if not self.entriesInChunkLoaded:
with open(os.path.join(self.params["pathToComb"], "entriesInChunk.pkl"),"rb") as f:
entriesInChunks = pickle.load(f)
self.entriesInChunkLoaded = True
else:
chunkItems = Parallel(n_jobs=self.params["n_jobs"], verbose=10)(delayed(calculateDistanceP)(c) for c in chunks)
entriesInChunks[self.currentAnalysisName] = {}
for k,v in chunkItems:
for E1E2 in v:
entriesInChunks[self.currentAnalysisName][E1E2] = k
with open(os.path.join(self.params["pathToComb"], "entriesInChunk.pkl"),"wb") as f:
pickle.dump(entriesInChunks,f)
print("Distance computing/checking: {} secs\n".format(round(time.time()-t1)))
def _createSignalChunks(self,chunkSize = 30):
"""
Creates signal chunks at given chunk size.
Parameter
---------
chunkSize - int. default 30. Nuber of signals in a single chunk.
Returns
-------
list of paths to the saved chunks.
"""
pathToSignalChunk = os.path.join(self.params["pathToComb"],"signalChunkNames.lzma")
if os.path.exists(pathToSignalChunk) and not self.params["recalculateDistance"]:
self.signalChunks = load(pathToSignalChunk)
print("Info :: Signal chunks loaded and found. Checking if all runs are present.")
if all(analysisName in self.signalChunks for analysisName in self.params["analysisName"]):
print("Info :: Checked... all samples found.")
return
else:
print("Info :: Not all samples found. Creating new signal chunks..")
if not hasattr(self,"signalChunks"):
self.signalChunks = dict()
else:
self.signalChunks.clear()
for analysisName in self.params["analysisName"]:
print("Info :: {} signal chunk creation started.\nThis may take some minutes.." .format(analysisName))
if "umap-dist" in self.params["metrices"]:
#umap dist calculations
print("Info :: Calculation UMAP.")
embed = umap.UMAP(min_dist=0.0000000000001, n_neighbors=5, metric = "correlation", random_state=56).fit_transform(minMaxNorm(self.Xs[analysisName].values,axis=1))
embed = pd.DataFrame(embed,index=self.Xs[analysisName].index)
#save embedding
embed.to_csv(os.path.join(self.params["pathToTmp"][analysisName],"chunks","embeddings.txt"),sep="\t")
signals = list(self.Signals[analysisName].values())
for n,Signal in enumerate(self.Signals[analysisName].values()):
setattr(Signal,"otherSignals", signals[n:])
c = []
for n,chunk in enumerate(chunks(signals,chunkSize)):
pathToChunk = os.path.join(self.params["pathToTmp"][analysisName],"chunks",str(n)+".pkl")
#if not os.path.exists(pathToChunk) and not self.params["recalculateDistance"]:
chunkItems = [
{
"ID" : str(signal.ID),
"chunkName" : str(n),
"Y" : np.array(signal.Y),
"ownPeaks" : signal.getPeaksAndsIDs(),
"otherSignalPeaks" : [s.getPeaksAndsIDs() for s in signal.otherSignals],
"E2" : [str(s.ID) for s in signal.otherSignals],
"metrices" : self.params["metrices"],
"pathToTmp" : self.params["pathToTmp"][analysisName],
"correlationWindowSize" : self.params["correlationWindowSize"],
"embedding" : embed.loc[signal.ID].values if "umap-dist" in self.params["metrices"] else [],
} for signal in chunk]
with open(pathToChunk,"wb") as f:
pickle.dump(chunkItems,f)
c.append(pathToChunk)
self.signalChunks[analysisName] = [p for p in c if os.path.exists(p)] #
#saves signal chunls.
dump(self.signalChunks,pathToSignalChunk)
def _collectRSquaredAndFitDetails(self):
"""
Data are collected from txt files in the modelPlots folder.
"""
if not self.params["savePeakModels"]:
print("!! Warning !! This parameter is depracted and from now on always true.")
self.params["savePeakModels"] = True
pathToPlotFolder = os.path.join(self.params["pathToTmp"][self.currentAnalysisName],"result","modelPlots")
resultFolder = os.path.join(self.params["pathToTmp"][self.currentAnalysisName],"result")
fittedPeaksPath = os.path.join(resultFolder,"fittedPeaks_{}.txt".format(self.currentAnalysisName))
nPeaksPath = os.path.join(resultFolder,"nPeaks.txt")
if os.path.exists(fittedPeaksPath) and os.path.exists(nPeaksPath):
print("Warning :: FittedPeaks detected. If you changed the data, you have to set the paramter 'restartAnalysis' True to include changes..")
return
if not os.path.exists(resultFolder):
os.mkdir(resultFolder)
#number of peaks
collectNumbPeaks = []
data = [{"Key":signal.ID,
"ID" : n,
"R2":signal.Rsquared,
"#Peaks":len(signal.modelledPeaks),
"Center":peakParam["mu"],
"Amplitude":peakParam["A"],
"Sigma":peakParam["sigma"],
"fwhm":peakParam["fwhm"],
"height" : peakParam["height"],
"AUC" : peakParam["AUC"],
"relAUC" : peakParam["relAUC"],
"validModel":signal.validModel,
"validData":signal.validData,
"Y": ",".join([str(round(x,3)) for x in peakParam["Y"]])} for signal in self.Signals[self.currentAnalysisName].values() if signal.valid for n,peakParam in enumerate(signal.modelledPeaks)]
df = pd.DataFrame().from_dict(data)
df.to_csv(fittedPeaksPath,sep="\t",index=None)
# # find peak properties..
# df = pd.DataFrame(columns=["Key","ID","Amplitude","Center","Sigma","fwhm","height","auc"])
# for file in os.listdir(pathToPlotFolder):
# if file.endswith(".txt"):
# try:
# dfApp = pd.read_csv(os.path.join(pathToPlotFolder,file), sep="\t")
# df = df.append(dfApp)
# collectNumbPeaks.append({"Key":dfApp["Key"].iloc[0],"N":len(dfApp.index)})
# except:
# continue
#pd.DataFrame(collectNumbPeaks).to_csv(nPeaksPath,sep="\t", index = None)
def _trainPredictor(self, addImpurity = 0.3, apexTraining = False):
"""
Trains the predictor based on positive interactions
in the database.
Parameters
----------
Returns
-------
None
"""
#metricColumns = [col for col in self.DB.df.columns if any(x in col for x in self.params["metrices"])]
if self.params["noDatabaseForPredictions"] or self.params["noDistanceCalculationAndPrediction"]:
print("Predictor training skipped (noDatabaseForPredictions = True or noDistanceCalculationAndPrediction = True). Distance metrices/Raw signals are used for dimensional reduction.")
return
folderToResults = [os.path.join(self.params["pathToTmp"][analysisName],"result") for analysisName in self.params["analysisName"]]
classifierFileName = os.path.join(self.params["pathToComb"],'trainedClassifier_{}.sav'.format(self.params["classifierClass"]))
if not self.params["retrainClassifier"] and os.path.exists(classifierFileName): #enumerate(
print("Info :: Prediction was done already... loading file")
self.classifier = joblib.load(classifierFileName)
return
metricColumnsForPrediction = self.params["metrices"]
totalColumns = metricColumnsForPrediction + ['Class',"E1E2"]
data = [self.DB.dfMetrices[analysisName][totalColumns].dropna(subset=metricColumnsForPrediction) for analysisName in self.params["analysisName"]]
data = pd.concat(data, ignore_index=True)
dataForTraining = data[["E1E2","Class"] + metricColumnsForPrediction]
dataForTraining["Class"] = dataForTraining["Class"].astype(np.float64)
print("Info :: Merging database metrices.")
print("Test size for classifier: {}".format(self.params["classifierTestSize"]))
if apexTraining and "apex" in totalColumns:
print("Info :: Performing apex based pooling.")
dataForTraining = dataForTraining.sort_values("apex").drop_duplicates("E1E2")
else:
dataForTraining = dataForTraining.groupby(dataForTraining['E1E2']).aggregate("min")
dataForTraining['Class'] = dataForTraining['Class'].astype(np.int64)
dataForTraining = dataForTraining.reset_index()
print("Info :: Using a total of {} features for classifier training.".format(dataForTraining.index.size))
if addImpurity > 0:
nRows = dataForTraining.index.size
rowIdx = np.random.choice(nRows,int(nRows * addImpurity),replace=False)#np.random.randint(0,nRows,size=int(nRows * addImpurity))
print(dataForTraining.loc[rowIdx,'Class'] ^ 1)
dataForTraining.loc[rowIdx,'Class'] = dataForTraining.loc[rowIdx,'Class'] ^ 1
print("Warning :: Stop! Using impurity for the training data is not advisable other than for testing. You should probably not do this?")
Y = dataForTraining['Class'].values
X = dataForTraining.loc[:,metricColumnsForPrediction].values
self.classifier = Classifier(
classifierClass = self.params["classifierClass"],
n_jobs=self.params['n_jobs'],
gridSearch = self.params["classiferGridSearch"],
testSize = self.params["classifierTestSize"])
probabilites, meanAuc, stdAuc, oobScore, optParams, Y_test, Y_pred = self.classifier.fit(X,Y,kFold=self.params["kFold"],pathToResults=self.params["pathToComb"], metricColumns = metricColumnsForPrediction)
dataForTraining["PredictionClass"] = probabilites
#save prediction summary
pathToFImport = os.path.join(self.params["pathToComb"],"PredictorSummary{}_{}.txt".format(self.params["metrices"],self.params["addImpurity"]))
#create and save classification report
classReport = classification_report(
Y_test,
Y_pred,
digits=3,
output_dict=True)
classReport = OrderedDict([(k,v) for k,v in classReport.items() if k != 'accuracy'])
pd.DataFrame().from_dict(classReport, orient="index").to_csv(pathToFImport, sep="\t", index=True)
#save database prediction
dataForTraining.to_csv(os.path.join(self.params["pathToComb"],"DBpred({}).txt".format(self.params["addImpurity"])),sep="\t", index=False)
self._plotFeatureImportance(self.params["pathToComb"])
joblib.dump(self.classifier, classifierFileName)
self._addMetricToStats("Metrices",str(metricColumnsForPrediction))
self._addMetricToStats("OOB_Score",oobScore)
self._addMetricToStats("ROC_Curve_AUC","{}+-{}".format(meanAuc,stdAuc))
self._addMetricToStats("ClassifierParams",optParams)
print("DB prediction saved - DBpred.txt :: Classifier pickled and saved 'trainedClassifier.sav'")
def _loadPairsForPrediction(self):
""
#load chunks that were saved
chunks = [f for f in os.listdir(os.path.join(self.params["pathToTmp"][self.currentAnalysisName],"chunks")) if f.endswith(".npy") and f != "source.npy"]
print("\nInfo :: Prediction/Dimensional reduction started...")
for chunk in chunks:
X = np.load(os.path.join(self.params["pathToTmp"][self.currentAnalysisName],"chunks",chunk),allow_pickle=True)
yield (X,len(chunks))
def _predictInteractions(self):
""
if self.params["noDatabaseForPredictions"] or self.params["noDistanceCalculationAndPrediction"]:
print("Info :: Skipping predictions. (noDatabaseForPredictions = True or noDistanceCalculationAndPrediction = True)")
return
paramDict = {"NumberInteractions" : 0, "positiveInteractors" : 0, "decoyInteractors" : 0, "novelInteractions" : 0, "interComplexInteractions" : 0}
probCutoffs = dict([(cutoff,paramDict.copy()) for cutoff in np.linspace(0.0,0.99,num=30)])
print("Info :: Starting prediction ..")
folderToOutput = os.path.join(self.params["pathToTmp"][self.currentAnalysisName],"result")
pathToPrediction = os.path.join(folderToOutput,"predictedInteractions{}_{}_{}.txt".format(self.params["metricesForPrediction"],self.params["classifierClass"],self.params["addImpurity"]))
if False and not self.params["retrainClassifier"] and os.path.exists(pathToPrediction):
predInts = pd.read_csv(pathToPrediction, sep="\t")
self.stats.loc[self.currentAnalysisName,"nInteractions ({})".format(self.params["interactionProbabCutoff"])] = predInts.index.size
return predInts
# del self.Signals
#gc.collect()
#create prob columns of k fold
pColumns = ["Prob_{}".format(n) for n in range(len(self.classifier.predictors))]
dfColumns = ["E1","E2","E1E2","apexPeakDist"] + [x if not isinstance(x,dict) else x["name"] for x in self.params["metrices"]] + pColumns + ["In DB"]
if not os.path.exists(folderToOutput):
os.mkdir(folderToOutput)
predInteractions = None
metricIdx = [n + 4 if "apex" in self.params["metrices"] else n + 3 for n in range(len(self.params["metrices"]))] #in order to extract from dinstances, apex creates an extra column (apex_dist)
for n,(X,nChunks) in enumerate(self._loadPairsForPrediction()):
boolSelfIntIdx = X[:,0] == X[:,1]
if n % 5 == 0:
percDone = round(n/nChunks*100,1)
print(percDone,r"%")
X = X[boolSelfIntIdx == False]
#first two rows E1 E2, and E1E2, apexPeakDist remove before predict
if X.shape[0] > 0:
classProba = self.classifier.predict(X[:,metricIdx])
else:
continue
if classProba is None:
continue
predX = np.append(X,classProba.reshape(X.shape[0],-1),axis=1)
interactionClass = self.DB.getInteractionClassByE1E2(X[:,2],X[:,0],X[:,1])
for cutoff in probCutoffs.keys():
boolPredIdx = classProba >= cutoff
if len(boolPredIdx.shape) > 1:
boolIdx = np.sum(boolPredIdx,axis=1) == self.params["kFold"]
else:
boolIdx = boolPredIdx
counts = interactionClass.loc[boolIdx].value_counts()
n = np.sum(boolIdx)
probCutoffs[cutoff]["NumberInteractions"] += n
probCutoffs[cutoff]["positiveInteractors"] += counts["pos"] if "pos" in counts.index else 0
probCutoffs[cutoff]["decoyInteractors"] += counts["decoy"] if "decoy" in counts.index else 0
probCutoffs[cutoff]["novelInteractions"] += counts["unknown/novel"] if "unknown/novel" in counts.index else 0
probCutoffs[cutoff]["interComplexInteractions"] += counts["inter"] if "inter" in counts.index else 0
boolPredIdx = classProba >= self.params["interactionProbabCutoff"]
if len(boolPredIdx.shape) > 1:
boolIdx = np.sum(boolPredIdx,axis=1) == self.params["kFold"]
else:
boolIdx = boolPredIdx
predX = np.append(predX,interactionClass.values.reshape(predX.shape[0],1),axis=1)
if predInteractions is None:
predInteractions = predX[boolIdx,:]
else:
predInteractions = np.append(predInteractions,predX[boolIdx,:], axis=0)
probData = pd.DataFrame().from_dict(probCutoffs, orient="index")
probData["FalseNegatives"] = probData["positiveInteractors"].iloc[0] - probData["positiveInteractors"]
probData["precision"] = (probData["positiveInteractors"]) / (probData["positiveInteractors"] + probData["interComplexInteractions"] + probData["decoyInteractors"])
probData["recall"] = (probData["positiveInteractors"]) / (probData["positiveInteractors"] + probData["FalseNegatives"])
probData["F1-measure"] = 2 * ((probData["precision"] * probData["recall"]) / (probData["precision"] + probData["recall"]))
probData["F-measure(b=2)"] = (1+2**2) * ((probData["precision"] * probData["recall"]) / (((2**2) * probData["precision"]) + probData["recall"]))
probData["F-measure(b=0.5)"] = (1+0.5**2)* ((probData["precision"] * probData["recall"]) / (((0.5**2) * probData["precision"]) + probData["recall"]))
#self.params["interactionProbabCutoff"] = float(probData.idxmax().loc["F1-measure"])
print("Info :: Interaction probability was set to: {} based on the F-metric using beta = 1.".format(self.params["interactionProbabCutoff"] ))
# boolPredIdx = classProba >= self.params["interactionProbabCutoff"]
# if len(boolPredIdx.shape) > 1:
# boolIdx = np.sum(boolPredIdx,axis=1) == self.params["kFold"]
# else:
# boolIdx = boolPredIdx
probData.to_csv(os.path.join(folderToOutput,"classiferPerformanceMetrics_{}_addImp{}.txt".format(self.params["classifierClass"],self.params["addImpurity"])),sep="\t")
# print("Interactions > cutoff :", predInteractions.shape[0])
# print("Info :: Finding interactions in DB")
# boolDbMatch = np.isin(predInteractions[:,2],self.DB.df["E1E2"].values, assume_unique=True)
# print("Info :: Appending matches.")
# predInteractions = np.append(predInteractions,boolDbMatch.reshape(predInteractions.shape[0],1),axis=1)
d = pd.DataFrame(predInteractions, columns = dfColumns)
print("Info :: Number of interactions detected: {} at cut-off {}".format(d.index.size,self.params["interactionProbabCutoff"]))
boolDbMatch = d["In DB"] == "pos"
print("Info :: Annotate complexes to pred. interactions.")
d["ComplexID"], d["ComplexName"] = zip(*[self._attachComplexID(_bool,E1E2) for E1E2, _bool in zip(predInteractions[:,2], boolDbMatch)])
d = self._attachPeakIDtoEntries(d)
# boolIdx = d[pColumns[0]] > self.params["interactionProbabCutoff"]
# d = d.loc[boolIdx]
origSize = d.index.size
print("Info : Filter for at least {} times in predicted interactions".format(self.params["minimumPPsPerFeature"]))
if self.params["usePeakCentricFeatures"]:
eColumns = ["E1p","E2p"]
else:
eColumns = ["E1","E2"]
Es = pd.Series(d[eColumns].values.flatten())
EsCounted = Es.value_counts()
boolIdx = EsCounted >= self.params["minimumPPsPerFeature"]
duplicatedPPs = EsCounted.index[boolIdx]
d = d.loc[d[eColumns].isin(duplicatedPPs).all(axis="columns")]
print("Removed interactions {}".format(origSize-d.index.size))
d.to_csv(pathToPrediction, sep="\t", index=False)
self.stats.loc[self.currentAnalysisName,"nInteractions ({})".format(self.params["interactionProbabCutoff"])] = d.index.size
self.stats.loc[self.currentAnalysisName,"Classifier"] = self.params["classifierClass"]
return d
def _attachComplexID(self,_bool,E1E2):
""
if not _bool:
return ("","")
else:
df = self.DB.df[self.DB.df["E1E2"] == E1E2]
return (';'.join([str(x) for x in df["ComplexID"].tolist()]),
';'.join([str(x) for x in df["complexName"].tolist()]))
def _plotChunkSummary(self, data, fileName, folderToOutput):
"util fn"
data[self.params["metrices"]] = self.classifier._scaleFeatures(data[self.params["metrices"]].values)
fig, ax = plt.subplots()
XX = data.melt(id_vars = [x for x in data.columns if x not in self.params["metrices"]],value_vars=self.params["metrices"])
sns.boxplot(data = XX, ax=ax, y = "value", x = "variable", hue = "Class")
plt.savefig(os.path.join(folderToOutput,"{}.pdf".format(fileName)))
plt.close()
def _plotFeatureImportance(self,folderToOutput,*args,**kwargs):
"""
Creates a bar chart showing the estimated feature importances
Parameters
----------
folderToOutput : string
Path to folder to save the pdf. Will be created if it does not exist.
*args
Variable length argument list passed to matplotlib.bar.
**kwargs
Arbitrary keyword arguments passed to matplotlib.bar.
Returns
-------
None
"""
fImp = self.classifier.getFeatureImportance()
self._makeFolder(folderToOutput)
if fImp is not None:
#save as txt file
pd.DataFrame(fImp, columns= self.params["metrices"]).to_csv(os.path.join(folderToOutput,"featureImportance{}.txt".format(self.params["metrices"])), sep="\t")
#plot feature importance
fig, ax = plt.subplots()
xPos = np.arange(len(self.params["metrices"]))
ax.bar(x = xPos, height = np.mean(fImp,axis=0), *args,**kwargs)
ax.errorbar(x = xPos, y = np.mean(fImp,axis=0), yerr = np.std(fImp,axis=0))
ax.set_xticks(xPos)
ax.set_xticklabels(self.params["metrices"], rotation = 45)
plt.savefig(os.path.join(folderToOutput,"featureImportance.pdf"))
plt.close()
def _randomStr(self,n):
"""
Returns a random string (lower and upper case) of size n
Parameters
----------
n : int
Length of string
Returns
-------
random string of length n
"""
letters = string.ascii_lowercase + string.ascii_uppercase
return "".join(random.choice(letters) for i in range(n))
def _scoreComplexes(self, complexDf, complexMemberIds = "subunits(UniProt IDs)", beta=2.5):
""
entryPositiveComplex = [self.DB.assignComplexToProtein(str(e),complexMemberIds,"ComplexID") for e in complexDf.index]
complexDf.loc[:,"ComplexID"] = entryPositiveComplex
matchingResults = pd.DataFrame(columns = ["Entry","Cluster Labels","Complex ID", "NumberOfInteractionsInDB"])
clearedEntries = pd.Series([x.split("_")[0] for x in complexDf.index], index=complexDf.index)
for c,d in self.DB.indentifiedComplexes.items():
boolMatch = clearedEntries.isin(d["members"])
clusters = complexDf.loc[boolMatch,"Cluster Labels"].values.flatten()
nEntriesMatch = np.sum(boolMatch)
if nEntriesMatch > 1:
groundTruth = [c] * nEntriesMatch
matchingResults = matchingResults.append(pd.DataFrame().from_dict({"Entry":complexDf.index[boolMatch].values,
"Cluster Labels" : clusters,
"Complex ID": groundTruth,
"NumberOfInteractionsInDB" : [d["n"]] * nEntriesMatch}) ,ignore_index=True)
if not matchingResults.empty:
score = v_measure_score(matchingResults["Complex ID"],matchingResults["Cluster Labels"],beta = beta)
else:
score = np.nan
return complexDf , score, matchingResults
def _clusterInteractions(self, predInts, clusterMethod = "HDBSCAN", plotEmbedding = True, groupFiles = [], combineProbs = True, groupName = ""):
"""
Performs dimensional reduction and clustering of prediction distance matrix over a defined parameter grid.
Parameter
predInts - ndarray.
clusterMethod - string. Any string of ["HDBSCAN",]
plotEmbedding - bool. If true, embedding is plotted and save to pdf and txt file.
returns
None
"""
embedd = None
bestDf = None
topCorrFeatures = None
splitLabels = False
recordScore = OrderedDict()
saveEmbeddings = []
maxScore = np.inf
metricColumns = [x if not isinstance(x,dict) else x["name"] for x in self.params["metricesForPrediction"]]
cb = ComplexBuilder(method=clusterMethod)
print("\nPredict complexes")
if predInts is None:
print("No database provided. UMAP and clustering will be performed using defaultKwargs. (noDatabaseForPredictions = True)")
pathToFolder = self._makeFolder(self.params["pathToComb"],"complexIdentification_{}".format(self.params["addImpurity"]))
if not self.params["databaseHasComplexAnnotations"] and not self.params["noDatabaseForPredictions"] and predInts is not None:
print("Database does not contain complex annotations. Therefore standard UMAP settings are HDBSCAN settings are used for complex identification.")
cb.set_params(self.params["hdbscanDefaultKwargs"])
clusterLabels, intLabels, matrix , reachability, core_distances, embedd = cb.fit(predInts,
metricColumns = metricColumns,
scaler = self.classifier._scaleFeatures,
umapKwargs= self.params["umapDefaultKwargs"])
elif self.params["noDistanceCalculationAndPrediction"] or self.params["noDatabaseForPredictions"]:
print("Info :: No database given for complex scoring. UMAP and HDBSCAN are performed to identify complexes.")
alignedEmbeddings = OrderedDict()
if len(self.Xs) > 1:
#correlate with each other
firstKey = list(self.Xs.keys())[0]
corrDfs = [self.Xs[firstKey].corrwith(df,axis=1,drop=True) for k,df in self.Xs.items() if k != firstKey]
mergedDf = pd.concat(corrDfs,join="inner",axis=1).mean(axis=1).sort_values(ascending=False)
topCorrFeatures = mergedDf.head(self.params["topNCorrFeaturesForUMAPAlignment"]).index
dataSets = [minMaxNorm(X.values,axis=1) for X in self.Xs.values()]
relations = []
for k,v in self.Xs.items():
if k != firstKey:
relationDict = dict([(self.Xs[prevKey].index.get_loc(idx),v.index.get_loc(idx)) for idx in topCorrFeatures])
relations.append(relationDict)
prevKey = k
print("Info :: Computing aligned UMAP using top correlated features.")
aligned_mapper = umap.aligned_umap.AlignedUMAP(**self.params["umapDefaultKwargs"]).fit(dataSets, relations=relations)
for n,umapE in enumerate(aligned_mapper.embeddings_):
key = list(self.Xs.keys())[n]
df = pd.DataFrame(umapE, index=self.Xs[key].index, columns = ["E({})_0".format(key),"E({})_0".format(key)])
alignedEmbeddings[key] = df.copy()
for analysisName in self.params["analysisName"]:
if self.params["useRawDataForDimensionalReduction"]:
print("Info :: Using raw intensity data for dimensional reduction. Not calculated distances")
if self.params["scaleRawDataBeforeDimensionalReduction"]:
X = self.Xs[analysisName]
predInts = pd.DataFrame(minMaxNorm(X.values,axis=1), index=X.index, columns = ["scaled_({})_{}".format(analysisName,colName) for colName in X.columns]).dropna()
else:
predInts = self.Xs[analysisName]
cb.set_params(self.params["hdbscanDefaultKwargs"])
clusterLabels, intLabels, matrix , reachability, core_distances, embedd, pooledDistances = cb.fit(predInts,
preCompEmbedding = alignedEmbeddings[analysisName] if analysisName in alignedEmbeddings else None,
metricColumns = self.X.columns,
scaler = None,
umapKwargs = self.params["umapDefaultKwargs"],
generateSquareMatrix = False,
)
df = pd.DataFrame().from_dict({"Entry":intLabels,"Cluster Labels":clusterLabels,"reachability":reachability,"core_distances":core_distances})
df = df.sort_values(by="Cluster Labels")
df = df.set_index("Entry")
predInts.to_csv(os.path.join(pathToFolder,"predInts_{}.txt".format(analysisName)))
else:
predInts = self._loadAndFilterDistanceMatrix()
predInts[metricColumns] = minMaxNorm(predInts[metricColumns].values,axis=0)
cb.set_params(self.params["hdbscanDefaultKwargs"])
clusterLabels, intLabels, matrix , reachability, core_distances, embedd, pooledDistances = cb.fit(predInts,
metricColumns = metricColumns,
scaler = None,
poolMethod= "min",
umapKwargs = self.params["umapDefaultKwargs"],
generateSquareMatrix = True,
)
df = pd.DataFrame().from_dict({"Entry":intLabels,"Cluster Labels({})".format(analysisName):clusterLabels,"reachability":reachability,"core_distances":core_distances})
df = df.sort_values(by="Cluster Labels")
df = df.set_index("Entry")
if pooledDistances is not None:
pooledDistances.to_csv(os.path.join(pathToFolder,"PooledDistance_{}.txt".format(self.currentAnalysisName)),sep="\t")
squaredDf = pd.DataFrame(matrix,columns=intLabels,index=intLabels).loc[df.index,df.index]
squaredDf.to_csv(os.path.join(pathToFolder,"SquaredSorted_{}.txt".format(self.currentAnalysisName)),sep="\t")
noNoiseIndex = df.index[df["Cluster Labels"] > 0]
squaredDf.loc[noNoiseIndex,noNoiseIndex].to_csv(os.path.join(pathToFolder,"NoNoiseSquaredSorted_{}.txt".format(self.currentAnalysisName)),sep="\t")
splitLabels = True
if embedd is not None and plotEmbedding:
#save embedding
dfEmbed = pd.DataFrame(embedd, columns = ["UMAP_{}_0{}".format(analysisName,n) for n in range(embedd.shape[1])])
dfEmbed["clusterLabels({})".format(analysisName)] = clusterLabels
dfEmbed["labels({})".format(analysisName)] = intLabels
if splitLabels:
dfEmbed["sLabels"] = dfEmbed["labels"].str.split("_",expand=True).values[:,0]
dfEmbed = dfEmbed.set_index("sLabels")
else:
dfEmbed = dfEmbed.set_index("labels({})".format(analysisName))
if self.params["scaleRawDataBeforeDimensionalReduction"] and self.params["useRawDataForDimensionalReduction"]:
dfEmbed = dfEmbed.join([self.Xs[self.currentAnalysisName],predInts],lsuffix="_",rsuffix="__")
else:
dfEmbed = dfEmbed.join(self.Xs[self.currentAnalysisName])
if topCorrFeatures is not None:
dfEmbed["FeatureForUMAPAlign"] = dfEmbed.index.isin(topCorrFeatures)
saveEmbeddings.append(dfEmbed)
dfEmbed.to_csv(os.path.join(pathToFolder,"UMAP_Embedding_{}.txt".format(analysisName)),sep="\t")
#plot embedding.
fig, ax = plt.subplots()
ax.scatter(embedd[:,0],embedd[:,1],s=12, c=clusterLabels, cmap='Spectral')
plt.savefig(os.path.join(pathToFolder,"E({}).pdf".format(analysisName)))
plt.close()
pd.concat(saveEmbeddings,axis=1).to_csv(os.path.join(pathToFolder,"concatEmbeddings.txt"),sep="\t")
else:
embedd = None
if len(groupFiles) > 0:
groupMetricColumns = ["Prob_0_({})".format(analysisName) for analysisName in groupFiles]
# print(groupMetricColumns)
usePeaks = self.params["usePeakCentricFeatures"]
print("Using peaks for clustering.")
print(groupMetricColumns)
if usePeaks:
# if len(groupFiles) > 0:
eColumns = ["E1p_({})".format(groupFiles[0]),"E2p_({})".format(groupFiles[0])]
predInts = predInts[groupMetricColumns + eColumns + ["E1E2"]]
else:
predInts = predInts[groupMetricColumns + ["E1","E2","E1E2"]]
eColumns = ["E1","E2"]
#
predInts.dropna(subset=groupMetricColumns,inplace=True,thresh=1)
for n, params in enumerate(list(ParameterGrid(CLUSTER_PARAMS[clusterMethod]))):
try:
cb.set_params(params)
if clusterMethod == "HDBSCAN":
clusterLabels, intLabels, matrix , reachability, core_distances, embedd, pooledDistances = cb.fit(predInts,
metricColumns = groupMetricColumns,#,#[colName for colName in predInts.columns if "Prob_" in colName],
scaler = None,#self.classifier._scaleFeatures, #
inv = True, # after pooling by poolMethod, invert (1-X)
poolMethod="max",
preCompEmbedding = None,
entryColumns = eColumns
)
else:
clusterLabels, intLabels, matrix , reachability, core_distances = cb.fit(predInts,
metricColumns = [colName for colName in predInts.columns if "Prob_" in colName],
scaler = self.classifier._scaleFeatures)
# clusterLabels, intLabels, matrix , reachability, core_distances = cb.fit(predInts, metricColumns = probColumn, scaler = None, inv=True, poolMethod="mean")
except Exception as e:
print(e)
print("\nWarning :: There was an error performing clustering and dimensional reduction, using the params:\n" + str(params))
continue
df = pd.DataFrame().from_dict({"Entry":intLabels,"Cluster Labels":clusterLabels,"reachability":reachability,"core_distances":core_distances})
df = df.sort_values(by=["Cluster Labels"])
if usePeaks:
df["E"] = df["Entry"].str.split("_",expand=True)[0]
df = df.set_index("E")
else:
df = df.set_index("Entry")
# clusteredComplexes = df[df["Cluster Labels"] != -1]
df, score, matchingResults = self._scoreComplexes(df)
# df = df.join(assignedIDs[["ComplexID"]])
if True:#maxScore > score: # write out all
df.to_csv(os.path.join( pathToFolder,"Complexes:{}_{}_{}.txt".format(groupName,n,score)),sep="\t")
matchingResults.to_csv(os.path.join( pathToFolder,"ComplexPerEntry(ScoreCalc):{}_{}_{}.txt".format(groupName,n,score)),sep="\t")
print("Info :: Current best params ... ")
# squaredDf = pd.DataFrame(matrix,columns=df.index,index=df.index).loc[df.index,df.index]
# squaredDf.to_csv(os.path.join(pathToFolder,"SquaredSorted{}_{}.txt".format(groupName,n)),sep="\t")
# if usePeaks:
# noNoiseIndex = df["Entry"].loc[df["Cluster Labels"] > 0]
# else:
# noNoiseIndex = df.index[df["Cluster Labels"] > 0]
# squaredDf.loc[noNoiseIndex,noNoiseIndex].to_csv(os.path.join(pathToFolder,"NoNoiseSquaredSorted_{}_{}.txt".format(groupName,n)),sep="\t")
maxScore = score
bestDf = df
self._plotComplexProfiles(bestDf, pathToFolder, str(n))
if embedd is not None and plotEmbedding:
#save embedding
umapColumnNames = ["UMAP_{}".format(n) for n in range(embedd.shape[1])]
dfEmbed = pd.DataFrame(embedd, columns = umapColumnNames)
embedd = dfEmbed[umapColumnNames]
dfEmbed["clusterLabels"] = clusterLabels
if usePeaks:
dfEmbed["Ep"] = intLabels
dfEmbed["Entry"] = [x.split("_")[0] for x in intLabels]
else:
dfEmbed["Entry"] = intLabels
dfEmbed = dfEmbed.set_index("Entry")
dfEmbed.loc[dfEmbed.index,"ComplexID"] = df["ComplexID"].loc[dfEmbed.index]
rawDataMerge = [self.Xs[analysisName] for analysisName in groupFiles]
if n == 0:
for sampleN,fileName in enumerate(groupFiles):
rawDataMerge[sampleN].columns = ["{}_({}):F{}".format(colName,fileName,sampleN) for colName in rawDataMerge[sampleN].columns]
dfEmbed = dfEmbed.join(other = rawDataMerge)
try:
dfEmbed.to_csv(os.path.join(pathToFolder,"UMAP_Embeding_{}_{}.txt".format(n,groupName)),sep="\t")
except:
print("Saving umap embedding failed.")
#plot embedding.
fig, ax = plt.subplots()
ax.scatter(embedd["UMAP_0"].values, embedd["UMAP_1"].values,s=50, c=clusterLabels, cmap='Spectral')
plt.savefig(os.path.join(pathToFolder,"UMAP_Embedding_{}_n{}.pdf".format(groupName,n)))
plt.close()
recordScore[n] = {"score":score,"params":params}
def _loadAndFilterDistanceMatrix(self):
"""
Output to disk: 'highQualityInteractions(..).txt
However they are just the ones that show the lowest distance metrices.
Parameters
----------
Returns
-------
None
"""
metricColumns = [x if not isinstance(x,dict) else x["name"] for x in self.params["metrices"]]
dfColumns = ["E1","E2","E1E2","apexPeakDist"] + metricColumns
q = None
df = pd.DataFrame(columns = dfColumns)
filteredExisting = False
pathToFile = os.path.join(self.params["pathToComb"],"highQualityInteractions({}).txt".format(self.currentAnalysisName))
for X,nChunks in self._loadPairsForPrediction():
boolSelfIntIdx = X[:,0] == X[:,1]
X = X[boolSelfIntIdx == False]
if q is None:
df = df.append(pd.DataFrame(X, columns = dfColumns), ignore_index=True)
else:
if not filteredExisting:
#first reduce existing df
mask = df[metricColumns] < q#X[:,[n+4 for n in range(len(self.params["metrices"]))]] < q
df = df.loc[np.any(mask,axis=1)] #filtered
filteredExisting = True
toAttach = pd.DataFrame(X, columns = dfColumns)
mask = toAttach[metricColumns] < q
toAttach = toAttach.loc[np.any(mask,axis=1)]
df = df.append(toAttach, ignore_index=True)
if df.index.size > 50000 and q is None:
q = np.quantile(df[metricColumns].astype(float).values, q = 1-self.params["metricQuantileCutoff"], axis = 0)
print("Info :: {} total pairwise protein-protein pairs at any distance below 10% quantile.".format(df.index.size))
df = self._attachPeakIDtoEntries(df)
df.to_csv(pathToFile, sep="\t")
print("Info :: Saving low distance interactions in result folder.")
return df
def _plotComplexProfiles(self,complexDf,outputFolder,name):
"""
Creates line charts as pdf for each profile.
Chart has two axes, one shows realy values and the bottom one
is scaled by normalizing the highest value to one and the lowest to zero.
Enabled/Disabled by the parameter "plotComplexProfiles".
Parameters
----------
complexDf : pd.DataFrame
asd
outputFolder : string
Path to folder, will be created if it does not exist.
name : string
Name of complex.
Returns
-------
None
"""
if self.params["plotComplexProfiles"]:
toProfiles = self._makeFolder(outputFolder,"complexProfiles")
pathToFolder = self._makeFolder(toProfiles,str(name))
x = np.arange(0,len(self.X.columns))
for c in complexDf["Cluster Labels"].unique():
if c != -1:
fig, ax = plt.subplots(nrows=2,ncols=1)
entries = complexDf.loc[complexDf["Cluster Labels"] == c,:].index
lineColors = sns.color_palette("Blues",desat=0.8,n_colors=entries.size)
for n,e in enumerate(entries):
uniprotID = e.split("_")[0]
if uniprotID in self.Signals[self.currentAnalysisName]:
y = self.Signals[self.currentAnalysisName][uniprotID].Y
normY = y / np.nanmax(y)
ax[0].plot(x,y,linestyle="-",linewidth=1, label=e, color = lineColors[n])
ax[1].plot(x,normY,linestyle="-",linewidth=1, label=e, color = lineColors[n])
plt.legend(prop={'size': 5})
plt.savefig(os.path.join(pathToFolder,"{}_n{}.pdf".format(c,len(entries))))
plt.close()
def _attachPeakIDtoEntries(self,predInts):
""
if not "apexPeakDist" in predInts.columns:
return predInts
peakIds = [peakID.split("_") for peakID in predInts["apexPeakDist"]]
predInts["E1p"], predInts["E2p"] = zip(*[("{}_{}".format(E1,peakIds[n][0]),"{}_{}".format(E2,peakIds[n][1])) for n,(E1,E2) in enumerate(zip(predInts["E1"],predInts["E2"]))])
return predInts
def _makeFolder(self,*args):
""
pathToFolder = os.path.join(*args)
if not os.path.exists(pathToFolder):
os.mkdir(pathToFolder)
return pathToFolder
def _createTxtFile(self,pathToFile,headers):
""
with open(pathToFile,"w+") as f:
f.write("\t".join(headers))
def _makeTmpFolder(self, n = 0):
"""
Creates temporary fodler.
Parameters
----------
n : int
Returns
-------
pathToTmp : str
ansolute path to tmp/anlysis name folder.
"""
if self.params["analysisName"] is None:
analysisName = self._randomStr(50)
elif isinstance(self.params["analysisName"],list) and n < len(self.params["analysisName"]):
analysisName = self.params["analysisName"][n]
else:
analysisName = str(self.params["analysisName"])
#check if results folder exists.
pathToTmp = os.path.join(".","results")
if not os.path.exists(pathToTmp):
os.mkdir(pathToTmp)
self.currentAnalysisName = analysisName
date = datetime.today().strftime('%Y-%m-%d')
self.params["Date of anaylsis"] = date
runName = self.params["runName"] if self.params["runName"] is not None else self._randomStr(3)
self.params["pathToComb"] = self._makeFolder(pathToTmp,"{}_n({})runs".format(runName,len(self.params["analysisName"])))
print("Info :: Folder created in which combined results will be saved: " + self.params["pathToComb"])
pathToTmpFolder = os.path.join(self.params["pathToComb"],analysisName)
if os.path.exists(pathToTmpFolder):
print("Info :: Path to results folder exsists")
if self.params["restartAnalysis"]:
print("Warning :: Argument restartAnalysis was set to True .. cleaning folder.")
#to do - shift to extra fn
for root, dirs, files in os.walk(pathToTmpFolder):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
else:
print("Info :: Will take files from there, if they exist")
return pathToTmpFolder
try:
self._makeFolder(pathToTmpFolder)
print("Info :: Result folder created -- ",analysisName)
self._makeFolder(pathToTmpFolder,"chunks")
print("Info :: Chunks folder created/checked")
self._makeFolder(pathToTmpFolder,"result")
print("Info :: Result folder created/checked")
self._makeFolder(pathToTmpFolder,"result","alignments")
print("Info :: Alignment folder created/checked")
self._makeFolder(pathToTmpFolder,"result","modelPlots")
print("Info :: Result/modelPlots folder created/checked. In this folder, all model plots will be saved here, if savePlots equals true, otherwise empty.")
# self._createTxtFile(pathToFile = os.path.join(pathToTmpFolder,"runTimes.txt"),headers = ["Date","Time","Step","Comment"])
return pathToTmpFolder
except OSError as e:
print(e)
raise OSError("Could not create result folder due to OS Error")
def _handleComptabFormat(self,X,filesToLoad):
"""
Extracts different samples from comptab format.
Parameters
----------
X : str
Path to folder where comptab files is located
filesToLoad:
list of txt/tsv files present in the folder
Returns
-------
detectedDataFrames : list of pd.DataFrame
list of identified data farmes from compbat file
fileNames : list of str
Internal names <comptabfileName>:<sampleName>
"""
detectedDataFrames = []
fileNames = []
for fileName in filesToLoad:
comptFile = pd.read_csv(os.path.join(X,fileName), sep="\t", header=[0,1], index_col=0)
columnsToKeep = [colNameTuple for colNameTuple in comptFile.columns if "unique peptides" not in colNameTuple and "coverage" not in colNameTuple and "protein length" not in colNameTuple]
comptFile = comptFile[columnsToKeep]
#find unique sample names given in the first header
samples = np.unique([colNameTuple[0] for colNameTuple in comptFile.columns])
for sampleName in samples:
sampleColumns = [colNameTuple for colNameTuple in comptFile.columns if colNameTuple[0] == sampleName]
dataFrame = pd.DataFrame(comptFile[sampleColumns].values,
columns = [colNameTuple[1] for colNameTuple in comptFile.columns if colNameTuple[0] == sampleName])
dataFrame["Uniprot ID"] = comptFile.index
detectedDataFrames.append(dataFrame)
fileNames.append("{}:{}".format(fileName,sampleName))
return detectedDataFrames, fileNames
def _mergeDistancesForGroups(self):
""
def run(self,X, maxValueToOne = False):
"""
Runs the ComplexFinder Script.
Parameters
----------
X : str, list, pd.DataFrame
Returns
-------
pathToTmp : str
ansolute path to tmp/anlysis name folder.
"""
self.allSamplesFound = False
self.entriesInChunkLoaded = False
global entriesInChunks
if isinstance(X,list) and all(isinstance(x,pd.DataFrame) for x in X):
if self.params["compTabFormat"]:
raise TypeError("If 'compTabFormat' is True. X must be a path to a folder. Either set compTabFormat to False or provide a path.")
print("Multiple dataset detected - each one will be analysed separetely")
if self.params["analysisName"] is None or not isinstance(self.params["analysisName"],list) or len(self.params["analysisName"]) != len(X):
self.params["analysisName"] = [self._randomStr(10) for n in range(len(X))] #create random analysisNames
print("Info :: 'anylsisName' did not match X shape. Created random strings per dataframe.")
elif isinstance(X,str):
if os.path.exists(X):
loadFiles = [f for f in os.listdir(X) if f.endswith(".txt") or f.endswith(".tsv")]
if self.params["compTabFormat"]:
Xs, loadFiles = self._handleComptabFormat(X,loadFiles)
else:
Xs = [pd.read_csv(os.path.join(X,fileName), sep="\t") for fileName in loadFiles]
#filterId = pd.read_csv(os.path.join("filter","SPY.txt"),index_col=None)
#Xs = [X.loc[X["Protein.Group"].isin(filterId["MouseMito"].values)] for X in Xs]
self.params["analysisName"] = loadFiles
if maxValueToOne:
maxValues = pd.concat([x.max(axis=1) for x in X], axis=1).max(axis=1)
normValueDict = dict([(X[0][self.params["idColumn"]].values[n],maxValue) for n,maxValue in enumerate(maxValues.values)])
self.params["normValueDict"] = normValueDict
else:
raise ValueError("Provided path {} does not exist.".format(X))
elif isinstance(X,pd.DataFrame):
Xs = [X]
self.params["analysisName"] = [self._randomStr(10)]
else:
ValueError("X must be either a string, a list of pandas data frames or pandas data frame itself.")
self.params["pathToTmp"] = {}
statColumns = ["nInteractions ({})".format(self.params["interactionProbabCutoff"]),"nPositiveInteractions","OOB_Score","ROC_Curve_AUC","Metrices","Classifier","ClassifierParams"]
self.stats = pd.DataFrame(index = self.params["analysisName"],columns = statColumns)
self.params["rawData"] = {}
self.params["runTimes"] = {}
self.params["runTimes"]["StartTime"] = time.time()
for n,X in enumerate(Xs):
pathToTmpFolder = self._makeTmpFolder(n)
self.params["pathToTmp"][self.currentAnalysisName] = pathToTmpFolder
if n == 0:
pathToParams = os.path.join(self.params["pathToComb"],"params.json")
pd.DataFrame().from_dict(self.params,orient="index").sort_index().to_json(pathToParams,indent = 4, orient="columns")
print("Info :: Parameters saved to output folder.")
if os.path.exists(os.path.join(self.params["pathToComb"],"runTimes.txt")):
if not self.params["restartAnalysis"] and not self.params["recalculateDistance"] and not self.params["retrainClassifier"]:
print("Warning :: Analysis done. Aborting (detected by finding the file 'runTimes.txt'")
return
print("------------------------")
print("--"+self.currentAnalysisName+"--")
print("--------Started---------")
print("--Signal Processing &--")
print("------Peak Fitting------")
print("------------------------")
if pathToTmpFolder is not None:
#loading data
self._load(X)
#self._checkGroups()
self._findPeaks(self.params["n_jobs"])
self._collectRSquaredAndFitDetails()
self._saveSignals()
combinedPeakModel = self._combinePeakResults()
self._attachQuantificationDetails(combinedPeakModel)
endSignalTime = time.time()
self.params["runTimes"]["SignalFitting&Comparision"] = time.time() - self.params["runTimes"]["StartTime"]
if not self.params["justFitAndMatchPeaks"]:
print("Info :: Peak modeling done. Starting with distance calculations and predictions (if enabled)..")
self._createSignalChunks()
for n,X in enumerate(X):
if n < len(self.params["analysisName"]): #happnes if others than txt file are present
self.currentAnalysisName = self.params["analysisName"][n]
print(self.currentAnalysisName," :: Starting distance calculations.")
self._calculateDistance()
self._mergeDistancesForGroups()
self.params["runTimes"]["Distance Calculation"] = time.time() - endSignalTime
distEndTime = time.time()
self._loadReferenceDB()
for analysisName in self.params["analysisName"]:
self._addMetricesToDB(analysisName)
dataPrepEndTime = time.time()
self.params["runTimes"]["Database Preparation"] = dataPrepEndTime - distEndTime
self._trainPredictor(self.params["addImpurity"])
for analysisName in self.params["analysisName"]:
self.currentAnalysisName = analysisName
self._predictInteractions()
#
#save statistics
self.stats.to_csv(os.path.join(self.params["pathToComb"],"statistics.txt"),sep="\t")
#combine interactions
if not self.params["noDistanceCalculationAndPrediction"]:
if not self.params["noDatabaseForPredictions"]:
combinedInteractions = self._combineInteractionsAndClusters()
else:
print("Warning/Info :: noDistancenCalculationAndPrediction is True, skipping combineInteraction step.")
endTrainingTime = time.time()
self.params["runTimes"]["Classifier Training & Prediction"] = endTrainingTime - dataPrepEndTime
if not self.params["noDistanceCalculationAndPrediction"] and len(self.params["grouping"]) > 0 and not self.params["noDatabaseForPredictions"]:
for groupName,groupFileNames in self.params["grouping"].items():
if isinstance(groupFileNames,str):
groupFileNames = [groupFileNames]
self._clusterInteractions(combinedInteractions,groupFiles = groupFileNames,groupName = groupName)
else:
print("Info :: Cluster Interactions")
self._clusterInteractions(None)
self.params["runTimes"]["Interaction Clustering and Embedding"] = time.time() - endTrainingTime
print("Info :: Run Times :: ")
print(self.params["runTimes"])
pd.DataFrame().from_dict(self.params["runTimes"],orient="index").to_csv(os.path.join(self.params["pathToComb"],"runTimes.txt"),sep="\t")
print("Info :: Analysis done.")
def _combinePredictedInteractions(self, pathToComb):
"""
Combines predicted Interactions based on the output
files : predictedInteractions[..].txt of each run.
Parameters
----------
pathToComb : str, path to combined result folder.
Returns
-------
combResults : pd.DataFrame
combined data frame for each run. All metrices and predictions are provided.
"""
pathToInteractions = os.path.join(pathToComb,"combinedInteractions.txt")
if False and os.path.exists(pathToInteractions) and not self.params["retrainClassifier"]:
combResults = pd.read_csv(pathToInteractions,sep="\t")
combResults = self._filterCombinedInteractions(combResults)
print("Info :: Combined interactions found and loaded.")
return combResults
print("Info :: Combining interactions of runs.")
preditctedInteractions = []
for analysisName in self.params["analysisName"]:
pathToResults = os.path.join(self.params["pathToTmp"][analysisName],"result")
pathToPrediction = os.path.join(pathToResults,"predictedInteractions{}_{}_{}.txt".format(self.params["metricesForPrediction"],self.params["classifierClass"],self.params["addImpurity"]))
if os.path.exists(pathToPrediction):
df = pd.read_csv(pathToPrediction,sep="\t", low_memory=False).set_index(["E1E2","E1","E2"])
df = df.loc[df["Prob_0"] > self.params["interactionProbabCutoff"]]
preditctedInteractions.append(df)
else:
raise ValueError("Warning :: PredictedInteractions not found. " + str(pathToPrediction))
for n,df in enumerate(preditctedInteractions):
analysisName = self.params["analysisName"][n]
if n == 0:
combResults = df
combResults.columns = ["{}_({})".format(colName,analysisName) for colName in df.columns]
combResults[analysisName] = pd.Series(["+"]*df.index.size, index = df.index)
else:
df.columns = ["{}_({})".format(colName,analysisName) for colName in df.columns]
#columnNames = [colName for colName in df.columns if colName] # we have them already from n = 0
df[analysisName] = pd.Series(["+"]*df.index.size, index = df.index)
# combResults["validSignal({})".format(analysisName)] = df[["E1_({})".format(analysisName),"E2_({})".format(analysisName)]].apply(lambda x: all(e in self.Signals[analysisName] and self.Signals[analysisName][e].valid for e in x.values),axis=1)
combResults = combResults.join(df, how="outer")
combResults = combResults.reset_index()
for analysisName in self.params["analysisName"]:
combResults["validSignalFit({})".format(analysisName)] = combResults[["E1","E2"]].apply(lambda x: all(e in self.Signals[analysisName] and self.Signals[analysisName][e].valid for e in x.values),axis=1)
combResults["#Valid Signal Fit"] = combResults[["validSignalFit({})".format(analysisName) for analysisName in self.params["analysisName"]]].sum(axis=1)
detectedColumn = [analysisName for analysisName in self.params["analysisName"]]
#detected in grouping
for groupName,groupItems in self.params["grouping"].items():
if all(groupItem in combResults.columns for groupItem in groupItems):
boolIdx = combResults[groupItems] == "+"
if isinstance(boolIdx,pd.Series):
#grouping equals 1 (groupItems, nonsenese (always ture), but repoted due to conisitency)
combResults["Complete in {}".format(groupName)] = boolIdx
else:
combResults["Complete in {}".format(groupName)] = np.sum(boolIdx,axis=1) == len(groupItems)
boolIdx = combResults[detectedColumn] == "+"
combResults["# Detected in"] = np.sum(boolIdx,axis=1)
combResults.sort_values(by="# Detected in", ascending = False, inplace = True)
# combResults.loc[combResults["E1E2"].str.contains("A0A087WU95")].to_csv("BiasedSelection.txt",sep="\t")
combResults.to_csv(pathToInteractions,sep="\t",index=True)
combResults = self._filterCombinedInteractions(combResults)
return combResults
def _filterCombinedInteractions(self,combResults):
"""
Filters combined interactions.
Parameters
----------
combResults : pd.DataFrame. Combined interactions.
Returns
-------
combResults : pd.DataFrame
filteredCombResults
"""
interactionsInAllSamples = self.params["considerOnlyInteractionsPresentInAllRuns"]
if isinstance(interactionsInAllSamples,bool) and interactionsInAllSamples:
filteredCombResults = combResults.loc[combResults["# Detected in"] == len(self.params["analysisName"])]
elif isinstance(interactionsInAllSamples,int):
if interactionsInAllSamples > len(self.params["analysisName"]):
interactionsInAllSamples = len(self.params["analysisName"])
filteredCombResults = combResults.loc[combResults["# Detected in"] >= interactionsInAllSamples]
else:
#if no filtering is applied.
filteredCombResults = combResults
return filteredCombResults
def _combineInteractionsAndClusters(self):
""
pathToComb = self.params["pathToComb"]
combinedInteractions = self._combinePredictedInteractions(pathToComb)
return combinedInteractions
def _saveSignalFitStatistics(self):
"Save Fit Statistic to disk"
pathToTxt = os.path.join(self.params["pathToTmp"][self.currentAnalysisName],"result","fitStatistic({}).txt".format(self.currentAnalysisName))
data = [{
"id":signal.ID,
"R2":signal.Rsquared,
"#Peaks":len(signal.modelledPeaks) if hasattr(signal,"modelledPeaks") else 0,
"valid":signal.valid,
"validModel":signal.validModel,
"validData":signal.validData
} for signal in self.Signals[self.currentAnalysisName].values()]
pd.DataFrame().from_dict(data).to_csv(pathToTxt,sep="\t")
def _checkAlignment(self,data):
""
data = data.dropna()
centerColumns = [colName for colName in data.columns if colName.startswith("auc")]
data[centerColumns].corr()
f = plt.figure()
ax = f.add_subplot(111)
ax.scatter(data[centerColumns[0]],data[centerColumns[1]])
plt.show()
def _alignProfiles(self,fittedPeaksData):
""
alignMethod = self.params["alignMethod"]
if len(fittedPeaksData) > 1 and alignMethod in alignModels and os.path.exists(self.params["pathToComb"]):
alignResults = OrderedDict([(analysisName,[]) for analysisName in self.params["analysisName"]])
fittedModels = dict()
removedDuplicates = [X.loc[~X.duplicated(subset=["Key"],keep=False)] for X in fittedPeaksData]
preparedData = []
for n,dataFrame in enumerate(removedDuplicates):
dataFrame.columns = ["{}_{}".format(colName,self.params["analysisName"][n]) if colName != "Key" else colName for colName in dataFrame.columns ]
dataFrame = dataFrame.set_index("Key")
preparedData.append(dataFrame)
#join data frames
joinedDataFrame = preparedData[0].join(preparedData[1:],how="outer")
if joinedDataFrame .index.size < 30:
print("Less than 30 data profiles with single peaks found. Aborting alignment")
return fittedPeaksData
#use linear regression or lowess
for comb in combinations(self.params["analysisName"],2):
c1, c2 = comb
columnHeaders = ["Center_{}".format(c1),"Center_{}".format(c2)]
data = joinedDataFrame.dropna(subset=columnHeaders)[columnHeaders]
absDiff = np.abs(data[columnHeaders[0]] - data[columnHeaders[1]])
pd.DataFrame(data).to_csv("alignedPeaks.txt",sep="\t")
boolIdx = absDiff > 5 #remove everything with a higher difference of 5.
data = data.loc[~boolIdx]
nRows = data.index.size
X, Y = data[[columnHeaders[0]]].values, data[[columnHeaders[1]]].values
model = alignModels["LinearRegression"](**alignModelsParams["LinearRegression"]).fit(X,Y)
lnSpace = np.linspace(np.min(data.values),np.max(data.values)).reshape(-1,1) #get min / max values
Yplot = model.predict(lnSpace)
#store R2
R2 = model.score(X,Y)
alignResults[c1].append(R2)
alignResults[c2].append(R2)
#save model
fittedModels[comb] = {"model":model,"R2":R2}
#plot alignment
f = plt.figure()
ax = f.add_subplot(111)
ax.scatter(joinedDataFrame["Center_{}".format(c1)],joinedDataFrame["Center_{}".format(c2)])
ax.plot(lnSpace,Yplot)
plt.savefig(os.path.join(self.params["pathToComb"],"{}.pdf".format(comb)))
#ax.plot()
#save alignment
o = pd.DataFrame(lnSpace)
o["y"] = Yplot
o.to_csv("curve_{}.txt".format(alignMethod),sep="\t")
#find run with highest R2s - this run will be used to align all other
maxR2SumRun = max(alignResults, key=lambda key: sum(alignResults[key]))
print("The run that will be used as a reference (highest sum of R2 for all fits) is {}".format(maxR2SumRun))
diffs = pd.DataFrame()
#calculate difference to reference run
for analysisName in self.params["analysisName"]:
if analysisName != maxR2SumRun:
columnHeaders = ["Center_{}".format(maxR2SumRun),"Center_{}".format(analysisName)]
data = joinedDataFrame.dropna(subset=columnHeaders)[columnHeaders]
absDiff = data[columnHeaders[0]] - data[columnHeaders[1]]
diffs[analysisName] = absDiff
diffs["c({})".format(analysisName)] = data[columnHeaders[0]]
fig, ax = plt.subplots(len(self.params["analysisName"]))
for n,analysisName in enumerate(self.params["analysisName"]):
if analysisName in diffs.columns:
data = joinedDataFrame.dropna(subset=columnHeaders)[columnHeaders]
diffs = diffs.sort_values(by="c({})".format(analysisName))
boolIdx = np.abs(diffs[analysisName]) < 3
X = diffs.loc[boolIdx,["c({})".format(analysisName)]].values
Y = diffs.loc[boolIdx,[analysisName]].values
ax[n].plot(X,Y,color="darkgrey")
model = alignModels[alignMethod](**alignModelsParams[alignMethod]).fit(X,Y)
lnSpace = np.linspace(np.min(data.values),np.max(data.values)).reshape(-1,1) #get min / max values
Yplot = model.predict(lnSpace)
ax[n].plot(lnSpace,Yplot,color="red")
plt.savefig(os.path.join(self.params["pathToComb"],"centerDiff.pdf"))
return fittedPeaksData
def _combinePeakResults(self):
"""
Combine Peak results. For each run, each signal profile per feature
is represented by an ensemble of peaks. This function matches
the peaks using a maximimal distance of 1.8 by default defined
by the parameter 'maxPeakCenterDifference'.
Peak height or area under curve are compared using a t-test and or an ANOVA.
Parameters
----------
Returns
-------
combResults : pd.DataFrame
filteredCombResults
"""
alignRuns = self.params["alignRuns"]
if self.params["peakModel"] == "SkewedGaussianModel":
columnsForPeakFit = ["Amplitude","Center", "Sigma", "Gamma", "fwhm", "height","auc","ID", "relAUC"]
else:
columnsForPeakFit = ["Amplitude","Center", "Sigma", "fwhm", "height","auc","ID", "relAUC"]
print("Info :: Combining peak results.")
print("Info :: Processing : "," : ".join(self.params["analysisName"]))
if len(self.params["analysisName"]) == 1:
print("Info :: Single run analysed. Will continue to create peak-centric output. No alignment performed.")
alignRuns = False
fittedPeaksData = []
suffixedColumns = []
for analysisName in self.params["analysisName"]:
suffixedColumns.extend(["{}_{}".format(colName,analysisName) for colName in columnsForPeakFit])
tmpFolder = self.params["pathToTmp"][analysisName]#os.path.join(".","tmp",analysisName)
resultsFolder = os.path.join(tmpFolder,"result")
fittedPeaks = os.path.join(resultsFolder,"fittedPeaks_{}.txt".format(analysisName))
if os.path.exists(fittedPeaks):
data = pd.read_csv(fittedPeaks,sep="\t")
if self.params["keepOnlySignalsValidInAllConditions"]:
validSignals = list(self.Signals[analysisName].keys())
boolIdx = data["Key"].isin(validSignals)
fittedPeaksData.append(data.loc[boolIdx])
else:
fittedPeaksData.append(data)
if alignRuns:
print("Info :: Aligning runs started.")
fittedPeaksData = self._alignProfiles(fittedPeaksData)
if len(fittedPeaksData) == 0:
raise ValueError("Fitted Peaks not found?")
uniqueKeys = np.unique(np.concatenate([x["Key"].unique().flatten() for x in fittedPeaksData]))
print("Info :: {} unique keys detected".format(uniqueKeys.size))
print("Info :: Combining peaks using max peak center diff of {}".format(self.params["maxPeakCenterDifference"]))
#combinedData = pd.DataFrame(columns=["Key","ID","PeakCenter"])
txtOutput = os.path.join(self.params["pathToComb"],"CombinedPeakModelResults.txt")
if not os.path.exists(txtOutput):
concatDataFrames = []
for uniqueKey in uniqueKeys:
boolIdxs = [x["Key"] == uniqueKey for x in fittedPeaksData]
filteredData = [fittedPeaksData[n].loc[boolIdx] for n,boolIdx in enumerate(boolIdxs)]
d = pd.DataFrame(columns=["Key"])
for n,df in enumerate(filteredData):
if df.empty:
continue
if n == 0:
df.columns = [colName if colName == "Key" else "{}_{}".format(colName,self.params["analysisName"][n]) for colName in df.columns.values.tolist()]
d = d.append(df)
else:
meanCenters = d[[colName for colName in d.columns if "Center" in colName]].mean(axis=1)
idx = meanCenters.index
if idx.size == 0:
df.columns = [colName if colName == "Key" else "{}_{}".format(colName,self.params["analysisName"][n]) for colName in df.columns.values.tolist()]
d = d.append(df)
continue
newIdx = []
for m,peakCenter in enumerate(df["Center"]):
peaksInRange = meanCenters.between(peakCenter-self.params["maxPeakCenterDifference"],peakCenter+self.params["maxPeakCenterDifference"])
if not np.any(peaksInRange):
newIdx.append(np.max(idx.values+1+m))
else:
newIdx.append(idx[peaksInRange].values[0])
df.index = newIdx
df.columns = [colName if colName == "Key" else "{}_{}".format(colName,self.params["analysisName"][n]) for colName in df.columns.values.tolist()]
targetColumns = [colName for colName in df.columns if self.params["analysisName"][n] in colName]
d =
|
pd.merge(d,df[targetColumns], how="outer",left_index=True,right_index=True)
|
pandas.merge
|
import logging
from operator import itemgetter
from logging.config import dictConfig
from datetime import datetime, timedelta, date
from math import ceil
import dash
import dash_table
from dash_table.Format import Format, Scheme
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import plotly.express as px
import pandas as pd
from chinese_calendar import get_holidays
import plotly.graph_objects as go
import numpy as np
from keysersoze.models import (
Deal,
Asset,
AssetMarketHistory,
)
from keysersoze.utils import (
get_accounts_history,
get_accounts_summary,
)
from keysersoze.apps.app import APP
from keysersoze.apps.utils import make_card_component
LOGGER = logging.getLogger(__name__)
dictConfig({
'version': 1,
'formatters': {
'simple': {
'format': '%(asctime)s - %(filename)s:%(lineno)s: %(message)s',
}
},
'handlers': {
'default': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
"stream": "ext://sys.stdout",
},
},
'loggers': {
'__main__': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
},
'keysersoze': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
}
}
})
pd.options.mode.chained_assignment = 'raise'
COLUMN_MAPPINGS = {
'code': '代码',
'name': '名称',
'ratio': '占比',
'return_rate': '收益率',
'cost': '投入',
'avg_cost': '成本',
'price': '价格',
'price_date': '价格日期',
'amount': '份额',
'money': '金额',
'return': '收益',
'action': '操作',
'account': '账户',
'date': '日期',
'time': '时间',
'fee': '费用',
'position': '仓位',
'day_return': '日收益',
}
FORMATS = {
'价格日期': {'type': 'datetime', 'format': Format(nully='N/A')},
'日期': {'type': 'datetime', 'format': Format(nully='N/A')},
'时间': {'type': 'datetime', 'format': Format(nully='N/A')},
'占比': {'type': 'numeric', 'format': Format(scheme='%', precision=2)},
'收益率': {'type': 'numeric', 'format': Format(nully='N/A', scheme='%', precision=2)},
'份额': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'金额': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'费用': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'投入': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'成本': {'type': 'numeric', 'format': Format(nully='N/A', precision=4, scheme=Scheme.fixed)},
'价格': {'type': 'numeric', 'format': Format(nully='N/A', precision=4, scheme=Scheme.fixed)},
'收益': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
}
ACCOUNT_PRIORITIES = {
'长期投资': 0,
'长赢定投': 1,
'U定投': 2,
'投资实证': 3,
'稳健投资': 4,
'证券账户': 6,
'蛋卷基金': 7,
}
all_accounts = [deal.account for deal in Deal.select(Deal.account).distinct()]
all_accounts.sort(key=lambda name: ACCOUNT_PRIORITIES.get(name, 1000))
layout = html.Div(
[
dcc.Store(id='assets'),
dcc.Store(id='stats'),
dcc.Store(id='accounts_history'),
dcc.Store(id='index_history'),
dcc.Store(id='deals'),
dcc.Store(id='start-date'),
dcc.Store(id='end-date'),
html.H3('投资账户概览'),
dbc.Checklist(
id='show-money',
options=[{'label': '显示金额', 'value': 'show'}],
value=[],
switch=True,
),
html.Hr(),
dbc.InputGroup(
[
dbc.InputGroupAddon('选择账户', addon_type='prepend', className='mr-2'),
dbc.Checklist(
id='checklist',
options=[{'label': a, 'value': a} for a in all_accounts],
value=[all_accounts[0]],
inline=True,
className='my-auto'
),
],
className='my-2',
),
html.Div(id='account-summary'),
html.Br(),
dbc.Tabs([
dbc.Tab(
label='资产走势',
children=[
dcc.Graph(
id='asset-history-chart',
config={
'displayModeBar': False,
}
),
]
),
dbc.Tab(
label='累计收益走势',
children=[
dcc.Graph(
id="total-return-chart",
config={
'displayModeBar': False
}
),
]
),
dbc.Tab(
label='累计收益率走势',
children=[
dbc.InputGroup(
[
dbc.InputGroupAddon('比较基准', addon_type='prepend', className='mr-2'),
dbc.Checklist(
id='compare',
options=[
{'label': '中证全指', 'value': '000985.CSI'},
{'label': '上证指数', 'value': '000001.SH'},
{'label': '深证成指', 'value': '399001.SZ'},
{'label': '沪深300', 'value': '000300.SH'},
{'label': '中证500', 'value': '000905.SH'},
],
value=['000985.CSI'],
inline=True,
className='my-auto'
),
],
className='my-2',
),
dcc.Graph(
id="return-curve-chart",
config={
'displayModeBar': False
}
),
]
),
dbc.Tab(
label='日收益历史',
children=[
dcc.Graph(
id="day-return-chart",
config={
'displayModeBar': False
},
),
]
),
]),
html.Center(
[
dbc.RadioItems(
id="date-range",
className='btn-group',
labelClassName='btn btn-light border',
labelCheckedClassName='active',
options=[
{"label": "近一月", "value": "1m"},
{"label": "近三月", "value": "3m"},
{"label": "近半年", "value": "6m"},
{"label": "近一年", "value": "12m"},
{"label": "今年以来", "value": "thisyear"},
{"label": "本月", "value": "thismonth"},
{"label": "本周", "value": "thisweek"},
{"label": "所有", "value": "all"},
{"label": "自定义", "value": "customized"},
],
value="thisyear",
),
],
className='radio-group',
),
html.Div(
id='customized-date-range-container',
children=[
dcc.RangeSlider(
id='customized-date-range',
min=2018,
max=2022,
step=None,
marks={year: str(year) for year in range(2018, 2023)},
value=[2018, 2022],
)
],
className='my-auto ml-0 mr-0',
style={'max-width': '100%', 'display': 'none'}
),
html.Hr(),
dbc.Tabs([
dbc.Tab(
label='持仓明细',
children=[
html.Br(),
dbc.Checklist(
id='show-cleared',
options=[{'label': '显示清仓品种', 'value': 'show'}],
value=[],
switch=True,
),
html.Div(id='assets_cards'),
html.Center(
[
dbc.RadioItems(
id="assets-pagination",
className="btn-group",
labelClassName="btn btn-secondary",
labelCheckedClassName="active",
options=[
{"label": "1", "value": 0},
],
value=0,
),
],
className='radio-group',
),
]
),
dbc.Tab(
label='交易记录',
children=[
html.Br(),
html.Div(id='deals_table'),
html.Center(
[
dbc.RadioItems(
id="deals-pagination",
className="btn-group",
labelClassName="btn btn-secondary",
labelCheckedClassName="active",
options=[
{"label": "1", "value": 0},
],
value=0,
),
],
className='radio-group',
),
]
),
])
],
)
@APP.callback(
[
dash.dependencies.Output('assets', 'data'),
dash.dependencies.Output('stats', 'data'),
dash.dependencies.Output('accounts_history', 'data'),
dash.dependencies.Output('index_history', 'data'),
dash.dependencies.Output('deals', 'data'),
dash.dependencies.Output('deals-pagination', 'options'),
dash.dependencies.Output('assets-pagination', 'options'),
],
[
dash.dependencies.Input('checklist', 'value'),
dash.dependencies.Input('compare', 'value'),
],
)
def update_after_check(accounts, index_codes):
accounts = accounts or all_accounts
summary_data, assets_data = get_accounts_summary(accounts)
history = get_accounts_history(accounts).to_dict('records')
history.sort(key=itemgetter('account', 'date'))
index_history = []
for index_code in index_codes:
index = Asset.get(zs_code=index_code)
for record in index.history:
index_history.append({
'account': index.name,
'date': record.date,
'price': record.close_price
})
index_history.sort(key=itemgetter('account', 'date'))
deals = []
for record in Deal.get_deals(accounts):
deals.append({
'account': record.account,
'time': record.time,
'code': record.asset.zs_code,
'name': record.asset.name,
'action': record.action,
'amount': record.amount,
'price': record.price,
'money': record.money,
'fee': record.fee,
})
deals.sort(key=itemgetter('time'), reverse=True)
valid_deals_count = 0
for item in deals:
if item['action'] == 'fix_cash':
continue
if item['code'] == 'CASH' and item['action'] == 'reinvest':
continue
valid_deals_count += 1
pagination_options = [
{'label': idx + 1, 'value': idx}
for idx in range(ceil(valid_deals_count / 100))
]
assets_pagination_options = []
return (
assets_data,
summary_data,
history,
index_history,
deals,
pagination_options,
assets_pagination_options
)
@APP.callback(
dash.dependencies.Output('account-summary', 'children'),
[
dash.dependencies.Input('stats', 'data'),
dash.dependencies.Input('show-money', 'value')
]
)
def update_summary(stats, show_money):
body_content = []
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '总资产',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['money'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '日收益',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['day_return'],
'color': 'bg-primary',
},
{
'item_cls': html.P,
'type': 'percent',
'content': stats['day_return_rate'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '累计收益',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['return'],
'color': 'bg-primary',
},
{
'item_cls': html.P,
'type': 'percent',
'content': stats['return_rate'] if stats['amount'] > 0 else 'N/A(已清仓)',
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '年化收益率',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'percent',
'content': stats['annualized_return'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True,
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '现金',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['cash'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '仓位',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'percent',
'content': stats['position'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
card = dbc.Card(
[
dbc.CardBody(
dbc.Row(
[dbc.Col([card_component]) for card_component in body_content],
),
className='py-2',
)
],
className='my-auto',
color='primary',
)
return [card]
@APP.callback(
dash.dependencies.Output('assets_cards', 'children'),
[
dash.dependencies.Input('assets', 'data'),
dash.dependencies.Input('show-money', 'value'),
dash.dependencies.Input('show-cleared', 'value'),
]
)
def update_assets_table(assets_data, show_money, show_cleared):
cards = [html.Hr()]
for row in assets_data:
if not show_cleared and abs(row['amount']) <= 0.001:
continue
if row["code"] in ('CASH', 'WZZNCK'):
continue
cards.append(make_asset_card(row, show_money))
cards.append(html.Br())
return cards
def make_asset_card(asset_info, show_money=True):
def get_color(value):
if not isinstance(value, (float, int)):
return None
if value > 0:
return 'text-danger'
if value < 0:
return 'text-success'
return None
header = dbc.CardHeader([
html.H5(
html.A(
f'{asset_info["name"]}({asset_info["code"]})',
href=f'/asset/{asset_info["code"].replace(".", "").lower()}',
target='_blank'
),
className='mb-0'
),
html.P(f'更新日期 {asset_info["price_date"]}', className='mb-0'),
])
body_content = []
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '持有金额/份额'},
{'item_cls': html.H4, 'type': 'money', 'content': asset_info['money']},
{'item_cls': html.P, 'type': 'amount', 'content': asset_info['amount']}
],
show_money=show_money,
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '日收益'},
{
'item_cls': html.H4,
'type': 'money',
'content': asset_info['day_return'],
'color': get_color(asset_info['day_return']),
},
{
'item_cls': html.P,
'type': 'percent',
'content': asset_info['day_return_rate'],
'color': get_color(asset_info['day_return']),
}
],
show_money=show_money,
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '现价/成本'},
{'item_cls': html.H4, 'type': 'price', 'content': asset_info['price']},
{'item_cls': html.P, 'type': 'price', 'content': asset_info['avg_cost'] or 'N/A'}
],
show_money=show_money,
)
)
asset = Asset.get(zs_code=asset_info['code'])
prices = []
for item in asset.history.order_by(AssetMarketHistory.date.desc()).limit(10):
if item.close_price is not None:
prices.append({
'date': item.date,
'price': item.close_price,
})
else:
prices.append({
'date': item.date,
'price': item.nav,
})
if len(prices) >= 10:
break
prices.sort(key=itemgetter('date'))
df = pd.DataFrame(prices)
df['date'] = pd.to_datetime(df['date'])
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=df['date'],
y=df['price'],
showlegend=False,
marker={'color': 'orange'},
mode='lines+markers',
)
)
fig.update_layout(
width=150,
height=100,
margin={'l': 4, 'r': 4, 'b': 20, 't': 10, 'pad': 4},
xaxis={'showticklabels': False, 'showgrid': False, 'fixedrange': True},
yaxis={'showticklabels': False, 'showgrid': False, 'fixedrange': True},
)
fig.update_xaxes(
rangebreaks=[
{'bounds': ["sat", "mon"]},
{
'values': get_holidays(df.date.min(), df.date.max(), False)
}
]
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '十日走势'},
{
'item_cls': None,
'type': 'figure',
'content': fig
}
],
show_money=show_money
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '累计收益'},
{
'item_cls': html.H4,
'type': 'money',
'content': asset_info['return'],
'color': get_color(asset_info['return']),
},
{
'item_cls': html.P,
'type': 'percent',
'content': asset_info['return_rate'],
'color': get_color(asset_info['return']),
}
],
show_money=show_money,
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '占比'},
{'item_cls': html.H4, 'type': 'percent', 'content': asset_info['position']},
],
show_money=show_money,
)
)
card = dbc.Card(
[
header,
dbc.CardBody(
dbc.Row(
[dbc.Col([card_component]) for card_component in body_content],
),
className='py-2',
)
],
className='my-auto'
)
return card
@APP.callback(
dash.dependencies.Output('return-curve-chart', 'figure'),
[
dash.dependencies.Input('accounts_history', 'data'),
dash.dependencies.Input('index_history', 'data'),
dash.dependencies.Input('start-date', 'data'),
dash.dependencies.Input('end-date', 'data'),
]
)
def draw_return_chart(accounts_history, index_history, start_date, end_date):
df = pd.DataFrame(accounts_history)[['amount', 'account', 'date', 'nav']]
df['date'] = pd.to_datetime(df['date'])
if start_date is not None:
df = df[df['date'] >= pd.to_datetime(start_date)]
if end_date is not None:
df = df[df['date'] < pd.to_datetime(end_date)]
df = df[df['account'] == '总计']
df['account'] = '我的'
fig = go.Figure()
if len(df) > 0:
start_nav = float(df[df['date'] == df['date'].min()].nav)
df.loc[:, 'nav'] = df['nav'] / start_nav - 1.0
df.rename(columns={'nav': 'return'}, inplace=True)
df = df.drop(df[df['amount'] <= 0].index)[['account', 'date', 'return']]
start_date = df.date.min()
fig.add_trace(
go.Scatter(
x=df['date'],
y=df['return'],
marker={'color': 'orange'},
name='我的',
mode='lines',
)
)
index_df = None
if index_history:
index_history = pd.DataFrame(index_history)
index_history['date'] = pd.to_datetime(index_history['date'])
if start_date is not None:
index_history = index_history[index_history['date'] >= pd.to_datetime(start_date)]
if end_date is not None:
index_history = index_history[index_history['date'] <
|
pd.to_datetime(end_date)
|
pandas.to_datetime
|
import pandas as pd
data = {'Name':['Ashika', 'Tanu', 'Ashwin', 'Mohit', 'Sourabh'],
'Age': [24, 23, 22, 19, 10]}
df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
symbols = ['^VIX','DIA','SPY','SPYG','SPXL','BFOCX','XLF','XSD','JKE','SCHG']
"""
key = "<KEY>"
def pull_prices(symbols,key,delay=True):
dat = pd.DataFrame()
for symbol in symbols:
URL = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol="+symbol+"&outputsize=full&apikey="+key+"&datatype=csv"
csv=pd.read_csv(URL)
csv['ticker'] = symbol
csv = csv.reindex(sorted(csv.columns), axis=1)
print(symbol)
csv=csv.rename(mapper={'close':'close_',
'open':'open_',
'timestamp':'timestamp_'},axis='columns')
csv.drop_duplicates(inplace=True)
if delay==True:
time.sleep(15)
dat=dat.append(csv)
return dat.reset_index(drop=True)
assets = pull_prices(symbols,key)
pd.set_option("display.max_columns",500)
assets
assets[assets['ticker']=='VIX'].values
"""
import yfinance as yf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
symbols = ['^VIX','^DJI','^GSPC','DIA','SPY','SPYG','SPXL','BFOCX','XLF','XSD','JKE','SCHG']
dat = yf.download(tickers = " ".join(symbols),
period = "max",
interval = "1d",
group_by = 'ticker')
dat.columns
assets =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
Created on Thu Jan 26 17:04:11 2017
Preprocess Luna datasets and create nodule masks (and/or blank subsets)
NOTE that:
1. we do NOT segment the lungs at all -- we will use the raw images for training (DO_NOT_SEGMENT = True)
2. No corrections are made to the nodule radius in relation to the thickness of the layers (radius = (ca[4])/2, simply)
@author: <NAME>, <EMAIL>
Some functions have reused from the respective examples/kernels openly published at the https://www.kaggle.com/arnavkj95/data-science-bowl-2017/ , as referenced within the file
"""
#%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.ndimage as ndimage
import scipy.ndimage # added for scaling\
import cv2
import time
import glob
from skimage import measure, morphology, segmentation
import SimpleITK as sitk
DO_NOT_SEGMENT = True #### major difference with the initial/Feb version
RESIZE_SPACING = [2,2,2]
### z, y, x (x & y MUST be the same)
luna_subset = 0 # initial
LUNA_BASE_DIR = "../luna/data/original_lungs/subset%s/" # added on AWS; data as well
LUNA_DIR = LUNA_BASE_DIR % luna_subset
CSVFILES = "../luna/data/original_lungs/CSVFILES/%s"
LUNA_ANNOTATIONS = CSVFILES % "annotations.csv"
LUNA_CANDIDATES = CSVFILES % "candidates.csv"
MARKER_INTERNAL_THRESH = -400 # was -400; maybe use -320 ??
MARKER_FRAME_WIDTH = 9 # 9 seems OK for the half special case ...
def generate_markers(image):
#Creation of the internal Marker
useTestPlot = False
if useTestPlot:
timg = image
plt.imshow(timg, cmap='gray')
plt.show()
add_frame_vertical = True # NOT a good idea; no added value
if add_frame_vertical: # add frame for potentially closing the lungs that touch the edge, but only vertically
fw = MARKER_FRAME_WIDTH # frame width (it looks that 2 is the minimum width for the algorithms implemented here, namely the first 2 operations for the marker_internal)
xdim = image.shape[1]
#ydim = image.shape[0]
img2 = np.copy(image)
img2 [:, 0] = -1024
img2 [:, 1:fw] = 0
img2 [:, xdim-1:xdim] = -1024
img2 [:, xdim-fw:xdim-1] = 0
marker_internal = img2 < MARKER_INTERNAL_THRESH
else:
marker_internal = image < MARKER_INTERNAL_THRESH # was -400
useTestPlot = False
if useTestPlot:
timg = marker_internal
plt.imshow(timg, cmap='gray')
plt.show()
correct_edges2 = False ## NOT a good idea - no added value
if correct_edges2:
marker_internal[0,:] = 0
marker_internal[:,0] = 0
#marker_internal[:,1] = True
#marker_internal[:,2] = True
marker_internal[511,:] = 0
marker_internal[:,511] = 0
marker_internal = segmentation.clear_border(marker_internal, buffer_size=0)
marker_internal_labels = measure.label(marker_internal)
areas = [r.area for r in measure.regionprops(marker_internal_labels)]
areas.sort()
if len(areas) > 2:
for region in measure.regionprops(marker_internal_labels):
if region.area < areas[-2]:
for coordinates in region.coords:
marker_internal_labels[coordinates[0], coordinates[1]] = 0
marker_internal = marker_internal_labels > 0
#Creation of the external Marker
external_a = ndimage.binary_dilation(marker_internal, iterations=10) # was 10
external_b = ndimage.binary_dilation(marker_internal, iterations=55) # was 55
marker_external = external_b ^ external_a
#Creation of the Watershed Marker matrix
#marker_watershed = np.zeros((512, 512), dtype=np.int) # origi
marker_watershed = np.zeros((marker_external.shape), dtype=np.int)
marker_watershed += marker_internal * 255
marker_watershed += marker_external * 128
return marker_internal, marker_external, marker_watershed
def generate_markers_3d(image):
#Creation of the internal Marker
marker_internal = image < -400
marker_internal_labels = np.zeros(image.shape).astype(np.int16)
for i in range(marker_internal.shape[0]):
marker_internal[i] = segmentation.clear_border(marker_internal[i])
marker_internal_labels[i] = measure.label(marker_internal[i])
#areas = [r.area for r in measure.regionprops(marker_internal_labels)]
areas = [r.area for i in range(marker_internal.shape[0]) for r in measure.regionprops(marker_internal_labels[i])]
for i in range(marker_internal.shape[0]):
areas = [r.area for r in measure.regionprops(marker_internal_labels[i])]
areas.sort()
if len(areas) > 2:
for region in measure.regionprops(marker_internal_labels[i]):
if region.area < areas[-2]:
for coordinates in region.coords:
marker_internal_labels[i, coordinates[0], coordinates[1]] = 0
marker_internal = marker_internal_labels > 0
#Creation of the external Marker
# 3x3 structuring element with connectivity 1, used by default
struct1 = ndimage.generate_binary_structure(2, 1)
struct1 = struct1[np.newaxis,:,:] # expand by z axis .
external_a = ndimage.binary_dilation(marker_internal, structure=struct1, iterations=10)
external_b = ndimage.binary_dilation(marker_internal, structure=struct1, iterations=55)
marker_external = external_b ^ external_a
#Creation of the Watershed Marker matrix
#marker_watershed = np.zeros((512, 512), dtype=np.int) # origi
marker_watershed = np.zeros((marker_external.shape), dtype=np.int)
marker_watershed += marker_internal * 255
marker_watershed += marker_external * 128
return marker_internal, marker_external, marker_watershed
BINARY_CLOSING_SIZE = 7 ## added for tests; 5 for disk seems sufficient - fo safety let's go with 6 or even 7
def seperate_lungs(image):
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers(image)
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, 1)
sobel_filtered_dy = ndimage.sobel(image, 0)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
#Reducing the image created by the Watershed algorithm to its outline
outline = ndimage.morphological_gradient(watershed, size=(3,3))
outline = outline.astype(bool)
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
#Perform the Black-Hat
outline += ndimage.black_tophat(outline, structure=blackhat_struct)
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure = morphology.disk(BINARY_CLOSING_SIZE) # better , 5 seems sufficient, we use 7 for safety/just in case
lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, iterations=3) # was structure=np.ones((5,5))
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
segmented = np.where(lungfilter == 1, image, -2000*np.ones((512, 512))) ### was -2000
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def rescale_n(n,reduce_factor):
return max( 1, int(round(n / reduce_factor)))
#image = image_slices[70]
def seperate_lungs_cv2(image):
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers(image)
reduce_factor = 512 / image.shape[0]
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, 1)
sobel_filtered_dy = ndimage.sobel(image, 0)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
useTestPlot = False
if useTestPlot:
timg = sobel_gradient
plt.imshow(timg, cmap='gray')
plt.show()
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
if useTestPlot:
timg = marker_external
plt.imshow(timg, cmap='gray')
plt.show()
#Reducing the image created by the Watershed algorithm to its outline
#wsize = rescale_n(3,reduce_factor) # THIS IS TOO SMALL, dynamically adjusting the size for the watersehed algorithm
outline = ndimage.morphological_gradient(watershed, size=(3,3)) # original (3,3), (wsize, wsize) is too small to create an outline
outline = outline.astype(bool)
outline_u = outline.astype(np.uint8) #added
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
blackhat_struct = ndimage.iterate_structure(blackhat_struct, rescale_n(8,reduce_factor)) # dyanmically adjust the number of iterattions; original was 8
blackhat_struct_cv2 = blackhat_struct.astype(np.uint8)
#Perform the Black-Hat
outline += (cv2.morphologyEx(outline_u, cv2.MORPH_BLACKHAT, kernel=blackhat_struct_cv2)).astype(np.bool) # fats
if useTestPlot:
timg = outline
plt.imshow(timg, cmap='gray')
plt.show()
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
if useTestPlot:
timg = lungfilter
plt.imshow(timg, cmap='gray')
plt.show()
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure2 = morphology.disk(2) # used to fill the gaos/holes close to the border (otherwise the large sttructure would create a gap by the edge)
structure3 = morphology.disk(rescale_n(BINARY_CLOSING_SIZE,reduce_factor)) # dynanically adjust; better , 5 seems sufficient, we use 7 for safety/just in case
##lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, ORIGINAL iterations=3) # was structure=np.ones((5,5))
lungfilter2 = ndimage.morphology.binary_closing(lungfilter, structure=structure2, iterations=3) # ADDED
lungfilter3 = ndimage.morphology.binary_closing(lungfilter, structure=structure3, iterations=3)
lungfilter = np.bitwise_or(lungfilter2, lungfilter3)
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
#image.shape
#segmented = np.where(lungfilter == 1, image, -2000*np.ones((512, 512)).astype(np.int16)) # was -2000 someone suggested 30
segmented = np.where(lungfilter == 1, image, -2000*np.ones(image.shape).astype(np.int16)) # was -2000 someone suggested 30
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def seperate_lungs_3d(image):
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers_3d(image)
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, axis=2)
sobel_filtered_dy = ndimage.sobel(image, axis=1)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
#Reducing the image created by the Watershed algorithm to its outline
outline = ndimage.morphological_gradient(watershed, size=(1,3,3))
outline = outline.astype(bool)
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
blackhat_struct = blackhat_struct[np.newaxis,:,:]
#Perform the Black-Hat
outline += ndimage.black_tophat(outline, structure=blackhat_struct) # very long time
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure = morphology.disk(BINARY_CLOSING_SIZE) # better , 5 seems sufficient, we use 7 for safety/just in case
structure = structure[np.newaxis,:,:]
lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, iterations=3) # was structure=np.ones((5,5))
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
segmented = np.where(lungfilter == 1, image, -2000*np.ones(marker_internal.shape))
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def get_slice_location(dcm):
return float(dcm[0x0020, 0x1041].value)
def thru_plane_position(dcm):
"""Gets spatial coordinate of image origin whose axis
is perpendicular to image plane.
"""
orientation = tuple((float(o) for o in dcm.ImageOrientationPatient))
position = tuple((float(p) for p in dcm.ImagePositionPatient))
rowvec, colvec = orientation[:3], orientation[3:]
normal_vector = np.cross(rowvec, colvec)
slice_pos = np.dot(position, normal_vector)
return slice_pos
def resample(image, scan, new_spacing=[1,1,1]):
# Determine current pixel spacing
spacing = map(float, ([scan[0].SliceThickness] + scan[0].PixelSpacing))
spacing = np.array(list(spacing))
#scan[2].SliceThickness
resize_factor = spacing / new_spacing
new_real_shape = image.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize_factor = new_shape / image.shape
new_spacing = spacing / real_resize_factor
#image = scipy.ndimage.interpolation.zoom(image, real_resize_factor) # nor mode= "wrap"/xxx, nor cval=-1024 can ensure that the min and max values are unchanged .... # cval added
image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest') ### early orig modified
#image = scipy.ndimage.zoom(image, real_resize_factor, order=1) # order=1 bilinear , preserves the min and max of the image -- pronbably better for us (also faster than spkine/order=2)
#image = scipy.ndimage.zoom(image, real_resize_factor, mode='nearest', order=1) # order=1 bilinear , preserves the min and max of the image -- pronbably better for us (also faster than spkine/order=2)
return image, new_spacing
def segment_one(image_slices):
useTestPlot = False
if useTestPlot:
print("Shape before segmenting\t", image_slices.shape)
plt.hist(image_slices.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
shape = image_slices.shape
l_segmented = np.zeros(shape).astype(np.int16)
l_lungfilter = np.zeros(shape).astype(np.bool)
l_outline = np.zeros(shape).astype(np.bool)
l_watershed = np.zeros(shape).astype(np.int16)
l_sobel_gradient = np.zeros(shape).astype(np.float32)
l_marker_internal = np.zeros(shape).astype(np.bool)
l_marker_external = np.zeros(shape).astype(np.bool)
l_marker_watershed = np.zeros(shape).astype(np.int16)
i=0
for i in range(shape[0]):
l_segmented[i], l_lungfilter[i], l_outline[i], l_watershed[i], l_sobel_gradient[i], l_marker_internal[i], l_marker_external[i], l_marker_watershed[i] = seperate_lungs_cv2(image_slices[i])
if useTestPlot:
plt.hist(image_slices.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
plt.hist(l_segmented.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
img_sel_i = shape[0] // 2
# Show some slice in the middle
plt.imshow(image_slices[img_sel_i], cmap=plt.cm.gray)
plt.show()
# Show some slice in the middle
plt.imshow(l_segmented[img_sel_i], cmap='gray')
plt.show()
mask = l_lungfilter.astype(np.int8)
regions = measure.regionprops(mask) # this measures the largest region and may lead to incorrect results when the mask is not the largest region !!!
bb = regions[0].bbox
#print(bb)
zlen = bb[3] - bb[0]
ylen = bb[4] - bb[1]
xlen = bb[5] - bb[2]
dx = 0
## have to reduce dx to 0 as for instance at least one image of the lungs stretch right to the border even without cropping
## namely for '../input/stage1/be57c648eb683a31e8499e278a89c5a0'
crop_max_ratio_z = 0.6 # 0.8 is to big make_submit2(45, 1)
crop_max_ratio_y = 0.4
crop_max_ratio_x = 0.6
bxy_min = np.min(bb[1:3])
bxy_max = np.max(bb[4:6])
mask_shape= mask.shape
image_shape = l_segmented.shape
mask_volume = zlen*ylen*zlen /(mask_shape[0] * mask_shape[1] * mask_shape[2])
mask_volume_thresh = 0.08 # anything below is too small (maybe just one half of the lung or something very small0)
mask_volume_check = mask_volume > mask_volume_thresh
# print ("Mask Volume: ", mask_volume )
### DO NOT allow the mask to touch x & y ---> if it does it is likely a wrong one as for:
## folders[3] , path = '../input/stage1/9ba5fbcccfbc9e08edcfe2258ddf7
#maskOK = False
if bxy_min >0 and bxy_max < 512 and mask_volume_check and zlen/mask_shape[0] > crop_max_ratio_z and ylen/mask_shape[1] > crop_max_ratio_y and xlen/mask_shape[2] > crop_max_ratio_x:
# mask OK< crop the image and mask
### full crop
#image = image[bb[0]:bb[3], bb[1]:bb[4], bb[2]:bb[5]]
#mask = mask[bb[0]:bb[3], bb[1]:bb[4], bb[2]:bb[5]]
## square crop and at least dx elements on both sides on x & y
bxy_min = np.min(bb[1:3])
bxy_max = np.max(bb[4:6])
if bxy_min == 0 or bxy_max == 512:
# Mask to bigg, auto-correct
print("The following mask likely too big, autoreducing by:", dx)
bxy_min = np.max((bxy_min, dx))
bxy_max = np.min ((bxy_max, mask_shape[1] - dx))
image = l_segmented[bb[0]:bb[3], bxy_min:bxy_max, bxy_min:bxy_max]
mask = mask[bb[0]:bb[3], bxy_min:bxy_max, bxy_min:bxy_max]
#maskOK = True
print ("Shape, cropped, bbox ", mask_shape, mask.shape, bb)
elif bxy_min> 0 and bxy_max < 512 and mask_volume_check and zlen/mask.shape[0] > crop_max_ratio_z:
## cut on z at least
image = l_segmented[bb[0]:bb[3], dx: image_shape[1] - dx, dx: image_shape[2] - dx]
#mask = mask[bb[0]:bb[3], dx: mask_shape[1] - dx, dx: mask_shape[2] - dx]
print("Mask too small, NOT auto-cropping x-y: shape, cropped, bbox, ratios, violume:", mask_shape, image.shape, bb, zlen/mask_shape[0], ylen/mask_shape[1], xlen/mask_shape[2], mask_volume)
else:
image = l_segmented[0:mask_shape[0], dx: image_shape[1] - dx, dx: image_shape[2] - dx]
#mask = mask[0:mask_shape[0], dx: mask_shape[1] - dx, dx: mask_shape[2] - dx]
print("Mask wrong, NOT auto-cropping: shape, cropped, bbox, ratios, volume:", mask_shape, image.shape, bb, zlen/mask_shape[0], ylen/mask_shape[1], xlen/mask_shape[2], mask_volume)
useSummaryPlot = True
if useSummaryPlot:
img_sel_i = shape[0] // 2
# Show some slice in the middle
plt.imshow(l_segmented[img_sel_i], cmap='gray')
plt.show()
return l_segmented, image
# the following 3 functions to read LUNA files are from: https://www.kaggle.com/arnavkj95/data-science-bowl-2017/candidate-generation-and-luna16-preprocessing/notebook
'''
This funciton reads a '.mhd' file using SimpleITK and return the image array,
origin and spacing of the image.
'''
def load_itk(filename):
# Reads the image using SimpleITK
itkimage = sitk.ReadImage(filename)
# Convert the image to a numpy array first and then shuffle the dimensions to get axis in the order z,y,x
ct_scan = sitk.GetArrayFromImage(itkimage)
# Read the origin of the ct_scan, will be used to convert the coordinates from world to voxel and vice versa.
origin = np.array(list(reversed(itkimage.GetOrigin())))
# Read the spacing along each dimension
spacing = np.array(list(reversed(itkimage.GetSpacing())))
return ct_scan, origin, spacing
'''
This function is used to convert the world coordinates to voxel coordinates using
the origin and spacing of the ct_scan
'''
def world_2_voxel(world_coordinates, origin, spacing):
stretched_voxel_coordinates = np.absolute(world_coordinates - origin)
voxel_coordinates = stretched_voxel_coordinates / spacing
return voxel_coordinates
'''
This function is used to convert the voxel coordinates to world coordinates using
the origin and spacing of the ct_scan.
'''
def voxel_2_world(voxel_coordinates, origin, spacing):
stretched_voxel_coordinates = voxel_coordinates * spacing
world_coordinates = stretched_voxel_coordinates + origin
return world_coordinates
def seq(start, stop, step=1):
n = int(round((stop - start)/float(step)))
if n > 1:
return([start + step*i for i in range(n+1)])
else:
return([])
'''
This function is used to create spherical regions in binary masks
at the given locations and radius.
'''
def draw_circles(image,cands,origin,spacing):
#make empty matrix, which will be filled with the mask
image_mask = np.zeros(image.shape, dtype=np.int16)
#run over all the nodules in the lungs
for ca in cands.values:
radius = (ca[4])/2 # VERSION iseg_luna3 - DO NOT CORRECT the radiius in ANY way ...!!
coord_x = ca[1]
coord_y = ca[2]
coord_z = ca[3]
image_coord = np.array((coord_z,coord_y,coord_x))
#determine voxel coordinate given the worldcoordinate
image_coord = world_2_voxel(image_coord,origin,spacing)
#determine the range of the nodule
#noduleRange = seq(-radius, radius, RESIZE_SPACING[0]) # original, uniform spacing
noduleRange_z = seq(-radius, radius, spacing[0])
noduleRange_y = seq(-radius, radius, spacing[1])
noduleRange_x = seq(-radius, radius, spacing[2])
#create the mask
for x in noduleRange_x:
for y in noduleRange_y:
for z in noduleRange_z:
coords = world_2_voxel(np.array((coord_z+z,coord_y+y,coord_x+x)),origin,spacing)
#if (np.linalg.norm(image_coord-coords) * RESIZE_SPACING[0]) < radius: ### original (constrained to a uniofrm RESIZE)
if (np.linalg.norm((image_coord-coords) * spacing)) < radius:
image_mask[int(np.round(coords[0])),int(np.round(coords[1])),int(np.round(coords[2]))] = int(1)
return image_mask
'''
This function takes the path to a '.mhd' file as input and
is used to create the nodule masks and segmented lungs after
rescaling to 1mm size in all directions. It saved them in the .npz
format. It also takes the list of nodule locations in that CT Scan as
input.
'''
luna_subset = 0 # initial
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
file = files[12] # rough empty set test - if file is empty this would fail; 12th - 3 nodules
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
print ("Luna annotations (head)")
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
print (cands)
def create_nodule_mask(imagePath, cands):
#if os.path.isfile(imagePath.replace('original',SAVE_FOLDER_image)) == False:
img, origin, spacing = load_itk(imagePath)
#calculate resize factor
resize_factor = spacing / RESIZE_SPACING # was [1, 1, 1]
new_real_shape = img.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize = new_shape / img.shape
new_spacing = spacing / real_resize
start = time.time()
#resize image
lung_img = scipy.ndimage.interpolation.zoom(img, real_resize, mode='nearest') # Andre mode added
if DO_NOT_SEGMENT:
lung_seg = lung_img
lung_seg_crop = lung_img
print("Rescale time, and path: ", ((time.time() - start)), imagePath )
else:
lung_seg, lung_seg_crop = segment_one(lung_img)
print("Rescale & Seg time, and path: ", ((time.time() - start)), imagePath )
useTestPlot = False
if useTestPlot:
plt.hist(img.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
plt.hist(lung_img.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
plt.hist(lung_seg.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
img_sel_i = img.shape[0] // 2
# Show some slice in the middle
plt.imshow(img[img_sel_i], cmap=plt.cm.gray)
plt.show()
img_sel_i = lung_seg.shape[0] // 2
# Show some slice in the middle
plt.imshow(lung_seg[img_sel_i], cmap='gray')
plt.show()
# Show some slice in the middle
plt.imshow(lung_seg_crop[img_sel_i], cmap='gray')
plt.show()
#create nodule mask
nodule_mask = draw_circles(lung_img,cands,origin,new_spacing)
if useTestPlot:
lung_img.shape
lung_seg.shape
lung_seg_crop.shape
nodule_mask.shape
for i in range(nodule_mask.shape[0]):
print ("Slice: ", i)
plt.imshow(nodule_mask[i], cmap='gray')
plt.show()
img_sel_i = 146 # 36
plt.imshow(lung_seg[img_sel_i], cmap=plt.cm.gray)
plt.show()
plt.imshow(nodule_mask[img_sel_i], cmap='gray')
plt.show()
for i in range (141, 153):
print ("Slice: ", i)
plt.imshow(lung_seg[i], cmap='gray')
plt.show()
#plt.imshow(nodule_mask[i], cmap='gray')
#plt.show()
w448 = int(448 // RESIZE_SPACING[1]) # we use 448 as this would be not enough just for 3 out of 1595 patients giving the pixels resolution ...:
#lung_img_448, lung_seg_448, nodule_mask_448 = np.zeros((lung_img.shape[0], w448, w448)), np.zeros((lung_seg.shape[0], w448, w448)), np.zeros((nodule_mask.shape[0], w448, w448))
lung_img_448 = np.full ((lung_img.shape[0], w448, w448), -2000, dtype=np.int16)
lung_seg_448 = np.full ((lung_seg.shape[0], w448, w448), -2000, dtype=np.int16)
nodule_mask_448 = np.zeros((nodule_mask.shape[0], w448, w448), dtype=np.int16)
original_shape = lung_img.shape
if (original_shape[1] > w448):
## need to crop the image to w448 size ...
print("Warning: additional crop from ... to width of: ", original_shape, w448)
offset = (w448 - original_shape[1])
y_min = abs(offset // 2 ) ## we use the same diff order as for offset below to ensure correct cala of new_origin (if we ever neeed i)
y_max = y_min + w448
lung_img = lung_img[:,y_min:y_max,:]
lung_seg = lung_seg[:,y_min:y_max,:]
nodule_mask = nodule_mask[:,y_min:y_max,:]
upper_offset = offset// 2
lower_offset = offset - upper_offset
new_origin = voxel_2_world([-upper_offset,-lower_offset,0],origin,new_spacing)
origin = new_origin
original_shape = lung_img.shape
if (original_shape[2] > w448):
x_min = (original_shape[2] - w448) // 2
x_max = x_min + w448
lung_img = lung_img[:,:,x_min:x_max]
lung_seg = lung_seg[:,:,x_min:x_max]
nodule_mask = nodule_mask[:,:,x_min:x_max]
original_shape = lung_img.shape
offset = (w448 - original_shape[1])
upper_offset = offset// 2
lower_offset = offset - upper_offset
new_origin = voxel_2_world([-upper_offset,-lower_offset,0],origin,new_spacing)
if offset > 0: #
for z in range(lung_img.shape[0]):
### if new_origin is used check the impact of the above crop for instance for:
### path = "'../luna/original_lungs/subset0/1.3.6.1.4.1.14519.5.2.1.6279.6001.430109407146633213496148200410'
lung_img_448[z, upper_offset:-lower_offset,upper_offset:-lower_offset] = lung_img[z,:,:]
lung_seg_448[z, upper_offset:-lower_offset,upper_offset:-lower_offset] = lung_seg[z,:,:]
nodule_mask_448[z, upper_offset:-lower_offset,upper_offset:-lower_offset] = nodule_mask[z,:,:]
else:
lung_img_448 = lung_img # equal dimensiona, just copy all (no nee to add the originals withion a frame)
lung_seg_448 = lung_seg
nodule_mask_448 = nodule_mask
nodule_mask_448_sum = np.sum(nodule_mask_448, axis=0)
if useTestPlot:
lung_img_448.shape
lung_seg_448.shape
#lung_seg_crop.shape
nodule_mask_448.shape
img_sel_i = 146 # 36
plt.imshow(lung_img_448[img_sel_i], cmap=plt.cm.gray)
plt.show()
plt.imshow(lung_seg_448[img_sel_i], cmap=plt.cm.gray)
plt.show()
plt.imshow(nodule_mask_448[img_sel_i], cmap='gray')
plt.show()
for i in range (141, 153):
print ("Slice: ", i)
plt.imshow(lung_seg_448[i], cmap='gray')
plt.show()
#plt.imshow(nodule_mask[i], cmap='gray')
#plt.show()
useSummaryPlot = True
if useSummaryPlot:
mask_sum_mean_x100 = 100 * np.mean(nodule_mask_448_sum)
axis = 1
lung_projections = []
mask_projections = []
for axis in range(3):
#sxm_projection = np.max(sxm, axis = axis)
lung_projections.append(np.mean(lung_seg_448, axis=axis))
mask_projections.append(np.max(nodule_mask_448, axis=axis))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(lung_projections[0],cmap=plt.cm.gray)
ax[1].imshow(lung_projections[1],cmap=plt.cm.gray)
ax[2].imshow(lung_projections[2],cmap=plt.cm.gray)
plt.show()
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(mask_projections[0],cmap=plt.cm.gray)
ax[1].imshow(mask_projections[1],cmap=plt.cm.gray)
ax[2].imshow(mask_projections[2],cmap=plt.cm.gray)
plt.show()
print ("Mask_sum_mean_x100: ", mask_sum_mean_x100)
# save images.
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if DO_NOT_SEGMENT:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1) # data removed from the second part on AWS
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
if DO_NOT_SEGMENT:
np.savez_compressed(path_segmented + '_lung', lung_seg_448)
else:
np.savez_compressed(path_segmented + '_lung_seg', lung_seg_448)
np.savez_compressed(path_segmented + '_nodule_mask', nodule_mask_448)
return
def find_lungs_range(y, noise):
n = len(y)
mid = n // 2
new_start = 0
for i in range(mid, 0, -1):
if y[i] < noise:
new_start = i
break
new_end = n
for i in range(mid, n, 1):
if y[i] < noise:
new_end = i
break
return new_start, new_end
def update_nodule_mask_or_blank (imagePath, cands, true_mask=True):
#if os.path.isfile(imagePath.replace('original',SAVE_FOLDER_image)) == False:
# load the old one and copy across
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if DO_NOT_SEGMENT:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
if true_mask:
# nothing to update reload and copy over
mask_img_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
nodule_mask_448 = mask_img_z['arr_0']
print("Loading and saving _nodule_mask as _nodule_mask_wblanks for: ", path_segmented)
else:
img, origin, spacing = load_itk(imagePath)
#calculate resize factor
resize_factor = spacing / RESIZE_SPACING
new_real_shape = img.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize = new_shape / img.shape
new_spacing = spacing / real_resize
# loading of the image/images to sync with the update -- DOES NOT WORK
attempt_through_reloading = False ## this has failed
if attempt_through_reloading:
if DO_NOT_SEGMENT:
lung_img_z = np.load(''.join((path_segmented + '_lung' + '.npz')))
else:
lung_img_z = np.load(''.join((path_segmented + '_lung_seg' + '.npz')))
lung_img = lung_img_z['arr_0']
else:
## have to redo the calculations
start = time.time()
#resize image
lung_img = scipy.ndimage.interpolation.zoom(img, real_resize, mode='nearest') # Andre mode added
if DO_NOT_SEGMENT:
lung_seg = lung_img
lung_seg_crop = lung_img
print("Rescale time, and path: ", ((time.time() - start)), imagePath )
else:
lung_seg, lung_seg_crop = segment_one(lung_img)
print("Rescale & Seg time, and path: ", ((time.time() - start)), imagePath )
nodule_mask = draw_circles(lung_img,cands,origin,new_spacing)
if not true_mask:
nodule_mask = -1 * nodule_mask # mark it as invalid to be zeroed later on (needed to get the blanks)
useTestPlot = False
if useTestPlot:
lung_img.shape
lung_seg.shape
lung_seg_crop.shape
nodule_mask.shape
#mask0 = np.load(''.join((path_segmented + '_module_mask' + '.npz')))
for i in range(nodule_mask.shape[0]):
print ("Slice: ", i)
plt.imshow(nodule_mask[i], cmap='gray')
plt.show()
w448 = int(448 // RESIZE_SPACING[1]) # we use 448 as this would be not enough just for 3 out of 1595 patients giving the pixels resolution ...:
nodule_mask_448 = np.zeros((nodule_mask.shape[0], w448, w448), dtype=np.int16)
original_shape = lung_img.shape
if (original_shape[1] > w448):
## need to crop the image to w448 size ...
print("Warning: additional crop from ... to width of: ", original_shape, w448)
offset = (w448 - original_shape[1])
y_min = abs(offset // 2 ) ## we use the same diff order as for offset below to ensure correct calculations of new_origin (if we ever neeed i)
y_max = y_min + w448
nodule_mask = nodule_mask[:,y_min:y_max,:]
upper_offset = offset// 2
lower_offset = offset - upper_offset
new_origin = voxel_2_world([-upper_offset,-lower_offset,0],origin,new_spacing)
origin = new_origin
original_shape = lung_img.shape
if (original_shape[2] > w448):
x_min = (original_shape[2] - w448) // 2
x_max = x_min + w448
nodule_mask = nodule_mask[:,:,x_min:x_max]
original_shape = lung_img.shape
offset = (w448 - original_shape[1])
upper_offset = offset// 2
lower_offset = offset - upper_offset
new_origin = voxel_2_world([-upper_offset,-lower_offset,0],origin,new_spacing)
if offset > 0: #
for z in range(lung_img.shape[0]):
nodule_mask_448[z, upper_offset:-lower_offset,upper_offset:-lower_offset] = nodule_mask[z,:,:]
else:
nodule_mask_448 = nodule_mask
nodule_mask_448_sum = np.sum(nodule_mask_448, axis=0)
if useTestPlot:
nodule_mask_448.shape
img_sel_i = 146 # 36
plt.imshow(nodule_mask_448[img_sel_i], cmap='gray')
plt.show()
useSummaryPlot = False
if useSummaryPlot:
mask_sum_mean_x100 = 100 * np.mean(nodule_mask_448_sum)
count_blanks = np.sum(nodule_mask_448 < 0)
axis = 1
lung_projections = []
mask_projections = []
for axis in range(3):
#sxm_projection = np.max(sxm, axis = axis)
#lung_projections.append(np.mean(lung_seg_448, axis=axis))
mask_projections.append(np.max(nodule_mask_448, axis=axis))
#f, ax = plt.subplots(1, 3, figsize=(15,5))
#ax[0].imshow(lung_projections[0],cmap=plt.cm.gray)
#ax[1].imshow(lung_projections[1],cmap=plt.cm.gray)
#ax[2].imshow(lung_projections[2],cmap=plt.cm.gray)
#plt.show()
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(mask_projections[0],cmap=plt.cm.gray)
ax[1].imshow(mask_projections[1],cmap=plt.cm.gray)
ax[2].imshow(mask_projections[2],cmap=plt.cm.gray)
plt.show()
print ("Mask_sum_mean_x100, blanks built-in: ", mask_sum_mean_x100, count_blanks)
np.savez_compressed(path_segmented + '_nodule_mask_wblanks', nodule_mask_448)
return
def create_nodule_mask_or_blank (imagePath, cands, true_mask=True):
#if os.path.isfile(imagePath.replace('original',SAVE_FOLDER_image)) == False:
img, origin, spacing = load_itk(imagePath)
#calculate resize factor
resize_factor = spacing / RESIZE_SPACING # was [1, 1, 1]
new_real_shape = img.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize = new_shape / img.shape
new_spacing = spacing / real_resize
start = time.time()
#resize image
lung_img = scipy.ndimage.interpolation.zoom(img, real_resize, mode='nearest') # Andre mode added
if DO_NOT_SEGMENT:
lung_seg = lung_img
lung_seg_crop = lung_img
print("Rescale time, and path: ", ((time.time() - start)), imagePath )
else:
lung_seg, lung_seg_crop = segment_one(lung_img)
print("Rescale & Seg time, and path: ", ((time.time() - start)), imagePath )
useTestPlot = False
if useTestPlot:
plt.hist(img.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
plt.hist(lung_img.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
plt.hist(lung_seg.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
img_sel_i = img.shape[0] // 2
# Show some slice in the middle
plt.imshow(img[img_sel_i], cmap=plt.cm.gray)
plt.show()
img_sel_i = lung_img.shape[0] // 2
# Show some slice in the middle
plt.imshow(lung_img[img_sel_i], cmap='gray')
plt.show()
# Show some slice in the middle
plt.imshow(lung_img[:, 4* lung_img.shape[1] // 6], cmap='gray')
plt.show()
HU_LUNGS_MIN = -900 # the algo is sensitive to this value -- keep it 900 unless retested
HU_LUNGS_MAX = -400
jsteps = 10
for j in range(jsteps):
# Show some slice in the middle
img_sel_i = j * lung_img.shape[1] // jsteps
img_cut = lung_img[:, img_sel_i]
lix = (img_cut > HU_LUNGS_MIN) & (img_cut < HU_LUNGS_MAX)
lix_y = np.sum(lix, axis=1)
print ("Cut & ratio, lix_y (min, mean, max): ", j, j/jsteps, np.min(lix_y),np.mean(lix_y), np.max(lix_y) )
noise = 3 * np.min(lix_y)
noise = 0.05 * np.max(lix_y)
noise = max([3 * np.min(lix_y), 0.05 * np.max(lix_y)])
print ("Lungs range: ", find_lungs_range(lix_y, noise))
plt.imshow(img_cut, cmap='gray')
plt.show()
plt.imshow(lix, cmap='gray')
plt.show()
plt.plot (lix_y)
plt.show()
ymin = int(0.4 * lung_img.shape[1])
ymax = int(0.6 * lung_img.shape[1])
zmin_new = lung_img.shape[0] // 2
zmax_new = lung_img.shape[0] // 2
j = ymin
for j in range(ymin, ymax+1):
img_cut = lung_img[:, j]
img_cut_lungs = (img_cut > HU_LUNGS_MIN) & (img_cut < HU_LUNGS_MAX)
lungs_across = np.sum(img_cut_lungs, axis = 1)
#noise_bottom_some = np.mean(lungs_across[0:5])
noise = np.max([3*np.min(lungs_across), 0.05 * np.max(lungs_across)]) # experimanetal -- could fail is scan has only central part of lungs and no borders at all -- CHECK
zmin, zmax = find_lungs_range(lungs_across, noise)
if zmin < zmin_new:
zmin_new = zmin
if zmax > zmax_new:
print ("j, zmax: ", j, zmax)
zmax_new = zmax
plt.imshow(img_cut, cmap='gray')
plt.show()
plt.imshow(img_cut_lungs, cmap='gray')
plt.show()
plt.plot (lungs_across)
plt.show()
HU_LUNGS_MIN = -950
HU_LUNGS_MAX = -400
ling = img #lung_img # lung_img # for our testing here
step = 400
for HU_LUNGS_MIN in range(-1000, 1000, step):
HU_LUNGS_MAX = HU_LUNGS_MIN + step
print ("HU_LUNGS_MIN, HU_LUNGS_MAX: ", HU_LUNGS_MIN, HU_LUNGS_MAX)
lix = (ling > HU_LUNGS_MIN) & (ling < HU_LUNGS_MAX)
lix_z = np.max(lix, axis=0).astype(np.int16)
plt.imshow(lix_z, cmap='gray')
plt.show()
HU_LUNGS_MIN = -900
HU_LUNGS_MAX = -500
ling = img #lung_img # lung_img # for our testing here
print ("HU_LUNGS_MIN, HU_LUNGS_MAX: ", HU_LUNGS_MIN, HU_LUNGS_MAX)
lix = (ling > HU_LUNGS_MIN) & (ling < HU_LUNGS_MAX)
lix_z = np.max(lix, axis=0).astype(np.int16)
lix_z_x = np.sum(lix_z, axis=0)
lix_z_y = np.sum(lix_z, axis=1)
plt.imshow(lix_z, cmap='gray')
plt.show()
plt.plot (lix_z_x)
plt.show()
plt.plot (lix_z_y)
plt.show()
for i in range(0,lung_img.shape[0], 10):
print("section: ", i)
plt.imshow(lung_img[i], cmap='gray')
plt.show()
img_sel_i = lung_seg.shape[0] // 2
# Show some slice in the middle
plt.imshow(lung_seg[img_sel_i], cmap='gray')
plt.show()
# Show some slice in the middle
plt.imshow(lung_seg_crop[img_sel_i], cmap='gray')
plt.show()
#create nodule mask
#cands.diameter_mm = 3.2
nodule_mask = draw_circles(lung_img,cands,origin,new_spacing)
if not true_mask:
nodule_mask = -1 * nodule_mask # mark it as invalid to be zeroed later on (needed to get the blanks)
#np.sum(nodule_mask)
if useTestPlot:
lung_img.shape
lung_seg.shape
lung_seg_crop.shape
nodule_mask.shape
for i in range(nodule_mask.shape[0]):
print ("Slice: ", i)
plt.imshow(nodule_mask[i], cmap='gray')
plt.show()
img_sel_i = 146 # 36
plt.imshow(lung_seg[img_sel_i], cmap=plt.cm.gray)
plt.show()
plt.imshow(nodule_mask[img_sel_i], cmap='gray')
plt.show()
for i in range (141, 153):
print ("Slice: ", i)
plt.imshow(lung_seg[i], cmap='gray')
plt.show()
#plt.imshow(nodule_mask[i], cmap='gray')
#plt.show()
w448 = int(448 // RESIZE_SPACING[1]) # we use 448 as this would be not enough just for 3 out of 1595 patients giving the pixels resolution ...:
#lung_img_448, lung_seg_448, nodule_mask_448 = np.zeros((lung_img.shape[0], w448, w448)), np.zeros((lung_seg.shape[0], w448, w448)), np.zeros((nodule_mask.shape[0], w448, w448))
lung_img_448 = np.full ((lung_img.shape[0], w448, w448), -2000, dtype=np.int16)
lung_seg_448 = np.full ((lung_seg.shape[0], w448, w448), -2000, dtype=np.int16)
nodule_mask_448 = np.zeros((nodule_mask.shape[0], w448, w448), dtype=np.int16)
original_shape = lung_img.shape
if (original_shape[1] > w448):
## need to crop the image to w448 size ...
print("Warning: additional crop from ... to width of: ", original_shape, w448)
offset = (w448 - original_shape[1])
y_min = abs(offset // 2 ) ## we use the same diff order as for offset below to ensure correct cala of new_origin (if we ever neeed i)
y_max = y_min + w448
lung_img = lung_img[:,y_min:y_max,:]
lung_seg = lung_seg[:,y_min:y_max,:]
nodule_mask = nodule_mask[:,y_min:y_max,:]
upper_offset = offset// 2
lower_offset = offset - upper_offset
new_origin = voxel_2_world([-upper_offset,-lower_offset,0],origin,new_spacing)
origin = new_origin
original_shape = lung_img.shape
if (original_shape[2] > w448):
x_min = (original_shape[2] - w448) // 2
x_max = x_min + w448
lung_img = lung_img[:,:,x_min:x_max]
lung_seg = lung_seg[:,:,x_min:x_max]
nodule_mask = nodule_mask[:,:,x_min:x_max]
original_shape = lung_img.shape
offset = (w448 - original_shape[1])
upper_offset = offset// 2
lower_offset = offset - upper_offset
new_origin = voxel_2_world([-upper_offset,-lower_offset,0],origin,new_spacing)
if offset > 0: #
for z in range(lung_img.shape[0]):
### if new_origin is used check the impact of the above crop for instance for:
### path = "'../luna/original_lungs/subset0/1.3.6.1.4.1.14519.5.2.1.6279.6001.430109407146633213496148200410'
lung_img_448[z, upper_offset:-lower_offset,upper_offset:-lower_offset] = lung_img[z,:,:]
lung_seg_448[z, upper_offset:-lower_offset,upper_offset:-lower_offset] = lung_seg[z,:,:]
nodule_mask_448[z, upper_offset:-lower_offset,upper_offset:-lower_offset] = nodule_mask[z,:,:]
else:
lung_img_448 = lung_img # equal dimensiona, just copy all (no nee to add the originals withion a frame)
lung_seg_448 = lung_seg
nodule_mask_448 = nodule_mask
nodule_mask_448_sum = np.sum(nodule_mask_448, axis=0)
#lung_seg_448_mean = np.mean(lung_seg_448, axis=0)
if useTestPlot:
lung_img_448.shape
lung_seg_448.shape
#lung_seg_crop.shape
nodule_mask_448.shape
img_sel_i = 146 # 36
plt.imshow(lung_img_448[img_sel_i], cmap=plt.cm.gray)
plt.show()
plt.imshow(lung_seg_448[img_sel_i], cmap=plt.cm.gray)
plt.show()
plt.imshow(nodule_mask_448[img_sel_i], cmap='gray')
plt.show()
for i in range (141, 153):
print ("Slice: ", i)
plt.imshow(lung_seg_448[i], cmap='gray')
plt.show()
#plt.imshow(nodule_mask[i], cmap='gray')
#plt.show()
useSummaryPlot = True
if useSummaryPlot:
mask_sum_mean_x100 = 100 * np.mean(nodule_mask_448_sum)
axis = 1
lung_projections = []
mask_projections = []
for axis in range(3):
#sxm_projection = np.max(sxm, axis = axis)
lung_projections.append(np.mean(lung_seg_448, axis=axis))
mask_projections.append(np.max(nodule_mask_448, axis=axis))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(lung_projections[0],cmap=plt.cm.gray)
ax[1].imshow(lung_projections[1],cmap=plt.cm.gray)
ax[2].imshow(lung_projections[2],cmap=plt.cm.gray)
plt.show()
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(mask_projections[0],cmap=plt.cm.gray)
ax[1].imshow(mask_projections[1],cmap=plt.cm.gray)
ax[2].imshow(mask_projections[2],cmap=plt.cm.gray)
plt.show()
print ("Mask_sum_mean_x100: ", mask_sum_mean_x100)
# save images.
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if DO_NOT_SEGMENT:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
#np.save(imageName + '_lung_img.npz', lung_img_448)
if DO_NOT_SEGMENT:
np.savez_compressed(path_segmented + '_lung', lung_seg_448)
else:
np.savez_compressed(path_segmented + '_lung_seg', lung_seg_448)
np.savez_compressed(path_segmented + '_nodule_mask', nodule_mask_448)
return
def create_nodule_mask_subset(luna_subset):
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
file = "../luna/original_lungs/subset0/1.3.6.1.4.1.14519.5.2.1.6279.6001.564534197011295112247542153557.mhd"
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
#print (cands)
create_nodule_mask (imagePath, cands)
def create_nodule_mask_or_blank_subset(luna_subset, create_wblanks_mask_only=False):
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
candidates = pd.read_csv(LUNA_CANDIDATES)
print ("Luna subset(s) and candidates count: ", (luna_subset, len(candidates)))
candidates_false = pd.DataFrame(candidates[candidates["class"] == 0]) # only select the false candidates
candidates_true = candidates[candidates["class"] == 1] # only select the false candidates
print ("False & true candidates: ", len(candidates_false), len(candidates_true))
#candidates.head()
use_all_blanks = True
if use_all_blanks:
aggregatez = 1 # from version unseg_blanks
else: # orginal aggregation
aggregatez = int(4 * RESIZE_SPACING[0]) # originally it was 4 -- for tjhe blanks version do NOT aggregate by Z
candidates_false["coordZ_8"] = candidates_false["coordZ"].round(0) // aggregatez * aggregatez
file = "../luna/original_lungs/subset0/1.3.6.1.4.1.14519.5.2.1.6279.6001.564534197011295112247542153557.mhd"
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
ctrue = candidates_true[seriesuid == candidates_true.seriesuid]
cfalse = candidates_false[seriesuid == candidates_false.seriesuid]
if len(cands) == 0 and len(ctrue) == 0 and len(cfalse) > 0:
if use_all_blanks:
print ("annonations and false candidates count (using all): ", len(cands), len(cfalse))
cfalse_sel_all =
|
pd.DataFrame(cfalse)
|
pandas.DataFrame
|
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
from pandas import Index, Period, PeriodIndex, Series, date_range, offsets, period_range
import pandas.core.indexes.period as period
import pandas.util.testing as tm
class TestPeriodIndex:
def setup_method(self, method):
pass
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="D")]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(
pd.Index(np.array(arr)), pd.Index(np.array(arr), dtype=object)
)
def test_constructor_use_start_freq(self):
# GH #1118
p = Period("4/2/2012", freq="B")
with tm.assert_produces_warning(FutureWarning):
index = PeriodIndex(start=p, periods=10)
expected = period_range(start="4/2/2012", periods=10, freq="B")
tm.assert_index_equal(index, expected)
index = period_range(start=p, periods=10)
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq="Q-DEC")
expected = period_range("1990Q3", "2009Q2", freq="Q-DEC")
tm.assert_index_equal(index, expected)
index2 = PeriodIndex(year=years, quarter=quarters, freq="2Q-DEC")
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
msg = "Mismatched Period array lengths"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="M")
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="2M")
msg = "Can either instantiate from fields or endpoints, but not both"
with pytest.raises(ValueError, match=msg):
PeriodIndex(
year=years, month=months, freq="M", start=Period("2007-01", freq="M")
)
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq="M")
exp = period_range("2007-01", periods=3, freq="M")
tm.assert_index_equal(idx, exp)
def test_constructor_U(self):
# U was used as undefined period
with pytest.raises(ValueError, match="Invalid frequency: X"):
|
period_range("2007-1-1", periods=500, freq="X")
|
pandas.period_range
|
"""
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from __future__ import division, print_function, unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import map, range
import argparse
from decimal import Decimal as D, getcontext
import math
import os
from string import Template
import io
import sys
import matplotlib
# http://matplotlib.org/users/pgf.html
pgf_with_pdflatex = {
"pgf.texsystem": "pdflatex",
"pgf.preamble": [
"\\usepackage[utf8x]{inputenc}",
"\\usepackage[T1]{fontenc}",
# "\\usepackage{mathptmx}",
]
}
matplotlib.rcParams.update(pgf_with_pdflatex)
matplotlib.use("pgf")
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import pandas as pd
from pandas.tools.plotting import scatter_matrix
pd.set_option('display.width', 200)
pd.set_option('display.float_format', '{:,.5g}'.format)
plt.style.use('ggplot') # ggplot-like style
# http://matplotlib.org/users/customizing.html
params = {
"font.family": "serif",
"font.serif": ["Palatino"], # use latex default serif font
# "font.sans-serif": ["DejaVu Sans"], # use a specific sans-serif font
"font.size": 10.0, # Set the global font-size
"axes.labelsize": "small", # Set the axes font-size
"legend.fontsize": "small", # Set the legend font-size
"xtick.labelsize": "small",
"ytick.labelsize": "small",
# "figure.figsize": [6.25, 4.75], # [inch^2] = 158,75 x 120.65 [mm^2]
# Set the total figure size:
# These dimensions seem to fit quite well, two figures fit on one page with some caption text
# \textwidth is 160mm
"figure.figsize": [6.0, 3.5], # [inch^2] = 158,75 x 101,6 [mm^2]
"figure.subplot.bottom": 0.11,
}
matplotlib.rcParams.update(params)
getcontext().prec = 5 # Set precision for Decimal
getcontext().capitals = 0 # No capital "E" ("e" instead)
DROP_DUPLICATES = True
def PathType(path_str):
"""
Determine if the given string is an existing path in the file system.
:param path_str:
:return:
"""
orig = path_str
path_str = os.path.expanduser(path_str) # Expand user path if necessary
path_str = os.path.abspath(path_str)
if os.path.exists(path_str):
return path_str
else:
raise argparse.ArgumentError(None, '"{}" is not a valid path'.format(orig))
def latex_table(content, caption, short_caption, label, label_prefix='tab:'):
"""
Return a LaTeX table-environment.
:param content: A valid LaTeX tabular-environment
:param caption: A caption string
:param short_caption: A short caption string
:param label: A valid label string
:param label_prefix: A label prefix
:return: LaTeX table-environment string
"""
LATEX_TABLE_TPL = """
\\begin{table}[htb]
\centering
{
\small
$content
}
\caption[$short_caption]{$caption}\label{$label}
\end{table}
"""
tpl = Template(LATEX_TABLE_TPL)
return tpl.substitute(content=content.replace('%', '\%'),
caption=caption,
short_caption=short_caption,
label='{}{}'.format(label_prefix, label))
def transform_stats_df(df):
if DROP_DUPLICATES:
df.drop_duplicates(['Graph Name'], inplace=True)
df = df[['Graph Name', 'Type']] # Create a new DF with only the relevant columns
return df
def transform_analysis_df(df):
df['filename'] = df['name'] # Duplicate row: 'name' contains <graphname>.<extension>
df['file_extension'] = df['name'].apply(lambda s: s.split('.')[-1]) # Create row with file extension only
df['name'] = df['name'].apply(lambda s: '.'.join(s.split('.')[:-1])) # Remove file extension from name
df['name'] = df['name'].apply(lambda s: s.replace('_', '-')) # Underscore in filename -> minus in graph name
if DROP_DUPLICATES:
df.drop_duplicates(['name'], inplace=True)
# Compute the automorphism group size using Decimal
df.rename(columns={'aut_group_size': 'aut_group_size_mult'}, inplace=True)
df['aut_group_size'] = [D(row['aut_group_size_mult']) * D(10) ** D(row['aut_group_size_exp'])
for _, row in df.iterrows()]
# Set the numpy nan instead of decimal NaN
df['aut_group_size'] = df['aut_group_size'].apply(lambda v: np.nan if v.is_nan() else v)
return df
def transform_merged_df(df):
df.drop(['Graph Name'], axis=1, inplace=True) # Remove the duplicate name column
# Reorder columns
df = df[['name', 'Type', 'n', 'm', 'density', 'modularity', 'aut_group_size',
# 'aut_group_size_mult', 'aut_group_size_exp',
'num_generators', 'num_orbits']]
df = df.sort_values(['name']) # Sort rows by name
df.index = list(range(len(df))) # Set a new index after the sorting
def rename_rt_graphs(entry):
if entry == 'retweet_graphs':
return 'rt'
else:
return entry
df['Type'] = df['Type'].apply(rename_rt_graphs)
df['redundancy_m'] = (df['num_orbits'] - 1) / df['n'] # Add the 'network redundancy'
df['redundancy'] = 1 - (df['num_orbits'] - 1) / (df['n'] - 1) # Add the normalized 'network redundancy'
# Drop within-class duplicates
# Use only for simplified analysis including duplicates (used for scatter of r_G' and Type)
# df.drop_duplicates(['Type', 'n', 'm', 'num_orbits', 'num_generators', 'aut_group_size'], inplace=True)
return df
def print_graph_stats_statistics(df_s):
type_names = {'bhoslib': 'BHOSLIB', # Benchmarks with Hidden Optimum Solutions for Graph Problems
'bio': 'Biological Networks',
'bn': 'Brain Networks',
'ca': 'Collaboration Networks',
'chem': 'Cheminformatics',
'dimacs': 'DIMACS', # Center for Discrete Mathematics and Theoretical Computer Science
'dimacs10': 'DIMACS10',
'dynamic': 'Dynamic Networks',
'eco': 'Ecology Networks',
'ia': 'Interaction Networks',
'inf': 'Infrastructure Networks',
'massive': 'Massive Network Data',
'misc': 'Miscellaneous Networks',
'rec': 'Recommendation Networks',
'retweet_graphs': 'Retweet Networks',
'rt': 'Retweet Networks',
'sc': 'Scientific Computing',
'soc': 'Social Networks',
'socfb': 'Facebook Networks',
'tech': 'Technological Networks',
'tscc': 'Temporal Reachability Networks',
'web': 'Web Graphs',
}
# Flatten inconsistency: retweet networks are sometimes typed 'rt', sometimes 'retweet_graphs'
def rename_rt_graphs(entry):
if entry == 'retweet_graphs':
return 'rt'
else:
return entry
df_s['Type'] = df_s['Type'].apply(rename_rt_graphs)
gb_type = df_s.groupby(['Type'])
records = {'sum': {'Short Type Name': '', 'Count': len(df_s), 'Type Name': '$\sum$'}}
for name, group_df in gb_type:
records[name] = {'Short Type Name': name, 'Count': len(group_df), 'Type Name': type_names.get(name, 'NA')}
df_nodup = df_s.drop_duplicates(['Graph Name'])
records['sum']['Count (w/o duplicates)'] = len(df_nodup)
gb_type = df_nodup.groupby(['Type'])
for name, group_df in gb_type:
records[name]['Count (w/o duplicates)'] = len(group_df)
df = pd.DataFrame.from_records(records.values())
df = df[['Type Name', 'Short Type Name', 'Count', 'Count (w/o duplicates)']]
# Set the 'double' name for retweet graphs
df.loc[df[df['Short Type Name'] == 'rt'].index, 'Short Type Name'] = 'rt/retweet\_networks'
df = df.sort_values(['Type Name'])
df.index = list(range(len(df))) # Set a new index after the sorting
print(latex_table(df.to_latex(escape=False, index=False),
'Number of data sets for different network types on \\texttt{networkrepository.com}. '
'Duplicates were dropped by graph name',
'Data set overview for \\texttt{networkrepository.com}',
'networkrepository.com_statistics'))
def _decimal_statistics(series):
# Custom implementation to compute the values for pd.DataFrame.describe() for Decimals
cleaned_series = series.dropna()
decimal_list = sorted([D(v) for v in cleaned_series])
count = len(decimal_list)
if count == 0:
return 0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
sum_ = sum(decimal_list)
mean = sum_ / count
std = (sum((d - mean)**2 for d in decimal_list) / (count - 1)).sqrt() if count > 1 else np.nan
min_ = min(decimal_list)
max_ = max(decimal_list)
assert min_ == decimal_list[0]
assert max_ == decimal_list[-1]
def quantile(p, l):
n = len(l)
if (n * p) % 2 == 0:
idx = int(n * p) - 1 # 0-indexed!
return (l[idx] + l[idx+1]) / 2
else:
idx = int(math.floor(n * p + 1)) - 1 # 0-indexed!
return l[idx]
p25 = quantile(0.25, decimal_list)
p50 = quantile(0.5, decimal_list)
p75 = quantile(0.75, decimal_list)
return count, mean, std, min_, p25, p50, p75, max_
def print_total_analysis_statistics(df):
# Remove all DIMACS10 graphs as they are sure duplicates!
# df = df[df['Type'] != 'dimacs10']
duplicates = df[df.duplicated(['name'])]['name']
# df_total = df.drop_duplicates(['name'])
df_total = df
# df_total = df[['name', 'Type', 'n', 'm', 'density', 'modularity', 'aut_group_size',
# 'num_generators', 'num_orbits']]
df_stat = df_total.describe() # Compute standard statistics (count, mean, std, quartiles, min, max)
aut_grp_size_stat = _decimal_statistics(df_total['aut_group_size'])
df_stat['aut_group_size'] = aut_grp_size_stat
# Drop the two columns, as the statistics are not meaningful for those
df_stat.drop(['num_orbits', 'num_generators'], axis=1, inplace=True)
# Rename the columns for LaTeX export
df_stat.rename(columns={'n': '$n$', 'm': '$m$', 'density': r'$\rho$', 'modularity': '$Q$',
'aut_group_size': '$|Aut(G)|$', 'redundancy': '$r_G\'$'}, inplace=True)
# df_stat.apply(lambda v: round(v, 10))
print('%% Statistics for the complete data (size={})'.format(len(df_total)))
print('%% (Duplicates: {})'.format(list(duplicates)))
counts = {'analyzed': len(df_total) - len(df_total[df_total['aut_group_size'].isnull()]),
'asymmetric': len(df_total[df_total['n'] == df_total['num_orbits']])}
short_caption = "Analysis statistics for \\texttt{networkrepository.com} data sets"
caption = '{short_caption}: ${asymmetric}$ of the ${analyzed}$ graphs that ' \
'were analyzed for symmetry are asymmetric'.format(short_caption=short_caption, **counts)
print(latex_table(df_stat.to_latex(na_rep='nan', escape=False), caption, short_caption, 'networkrepos_total'))
# print latex_table(df_stat.to_latex(escape=False), caption, short_caption, 'networkrepos_total')
print('')
def print_group_analysis_statistics(group_name, df_group):
duplicates = df_group[df_group.duplicated(['name'])]['name']
group_df = df_group.drop_duplicates(['name'])
df_stat = group_df.describe()
df_stat.drop(['num_orbits', 'num_generators'], axis=1, inplace=True)
aut_grp_size_stat = _decimal_statistics(df_group['aut_group_size'])
df_stat['aut_group_size'] = aut_grp_size_stat
df_stat.rename(columns={'n': '$n$', 'm': '$m$', 'density': r'$\rho$', 'modularity': '$Q$',
'aut_group_size': '$|Aut(G)|$', 'redundancy': '$r_G\'$'}, inplace=True)
print('%% Statistics for type "{}" (group-size={})'.format(group_name, len(group_df)))
print('%% (Duplicates: {})'.format(list(duplicates)))
counts = {'analyzed': len(group_df) - len(group_df[group_df['aut_group_size'].isnull()]),
'asymmetric': len(group_df[group_df['n'] == group_df['num_orbits']])}
short_caption = "Analysis statistics for category ``%s'' on \\texttt{networkrepository.com}" % group_name
caption = '{short_caption}: ${asymmetric}$ of the ${analyzed}$ graphs that ' \
'were analyzed for symmetry are asymmetric'.format(short_caption=short_caption, **counts)
print(latex_table(df_stat.to_latex(na_rep='nan', escape=False), caption, short_caption,
'networkrepos_{}'.format(group_name)))
print('')
def create_scatterplot(df, attr1, attr2, xlabel=None, ylabel=None, xlim=None, ylim=None, c=None, cmap=None):
if not xlabel:
xlabel = attr1
if not xlabel:
ylabel = attr2
fig = plt.figure()
ax = plt.subplot('111')
# ax.scatter(df['density'], df['modularity'], c=df['aut_group_size'].apply(lambda x: x == 1),
# s=20, marker='.', linewidth=0)
# ax.scatter(df['density'], df['modularity'], c=df['Type'].apply(hash), s=20, marker='.', linewidth=0)
paths = ax.scatter(df[attr1], df[attr2], c=c, s=20, marker='.', linewidth=0, cmap=cmap)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
return fig, ax
def create_scatterplots(df, args):
# # Complete scatter matrix
# # axes = scatter_matrix(scatter_df[['n', 'm', 'density', 'modularity', 'redundancy']])
# axes = scatter_matrix(df[['density', 'modularity', 'redundancy']], c=df['Type'].apply(hash))
# # for x, y in ((x, y) for x in xrange(0, 5) for y in xrange(2, 5)):
# # if x == y:
# # continue
# # axes[x, y].set_xlim((-0.1, 1.1))
# # for x, y in ((x, y) for x in xrange(2, 5) for y in xrange(0, 5)):
# # if x == y:
# # continue
# # axes[x, y].set_ylim((-0.1, 1.1))
# plt.savefig(os.path.join(args.target_dir, 'networkrepository_scatter_complete.pgf'))
# plt.close()
# *** Modularity and redundancy ***
lim = (-.02, 1.02)
fig, ax = create_scatterplot(df, 'modularity', 'redundancy', c=df['Type'].apply(hash),
xlabel='$Q$', ylabel='$r_G\'$', xlim=lim, ylim=lim)
ax.legend()
fig.tight_layout()
fig.savefig(os.path.join(args.target_dir, 'networkrepository_scatter_Q_rG.pgf'))
plt.close()
# # |Aut(G)| and redundancy
# lim = (-.02, 1.02)
# df['aut_g_size_exp'] = df['aut_group_size_exp'] + df['aut_group_size_mult'].apply(math.log10).apply(math.floor)
# fig, ax = create_scatterplot(df, 'aut_g_size_exp', 'redundancy', c=df['Type'].apply(hash),
# xlabel='$b$', ylabel='$r_G\'$',
# xlim=(0, df['aut_g_size_exp'].max()), ylim=lim)
#
# ax.set_xscale('symlog')
# ax.legend()
# fig.tight_layout()
#
# fig.savefig(os.path.join(args.target_dir, 'networkrepository_scatter_AutG_rG.pgf'))
# plt.close()
# *** Modularity and density ***
lim = (-.02, 1.02)
fig, ax = create_scatterplot(df, 'modularity', 'density', # c=df['Type'].apply(hash),
xlabel='$Q$', ylabel='$\\rho$', xlim=lim, ylim=lim)
# ax.legend()
fig.tight_layout()
ax.plot([0, 1], [1, 0],
linestyle='-.',
color=list(plt.rcParams['axes.prop_cycle'])[1]['color'],
alpha=0.7,
# label='${0} = {1}$'.format(COL_NAMES['avg_cluster_size'][2:-2], COL_NAMES['max_group_support'][2:-2])
)
fig.savefig(os.path.join(args.target_dir, 'networkrepository_scatter_Q_rho.pgf'))
plt.close()
# *** Redundancy and Type ***
gb_type = df.groupby(['Type'])
type_dict = {name: idx for idx, (name, _) in enumerate(sorted(gb_type, key=lambda e: e[0]))}
size_dict = {name: len(group) for name, group in gb_type}
sym_size_dict = {name: len(group[group['n'] > group['num_orbits']]) for name, group in gb_type}
fig = plt.figure()
ax = plt.subplot('111')
paths = ax.scatter(df['redundancy'], df['Type'].apply(lambda t: -type_dict[t]),
s=15, marker='.', linewidth=0, c='b')
ax.set_xlabel("$r_G'$")
ax.set_ylabel("Type")
ax.set_yticks(sorted([-x for x in type_dict.values()]))
labels = [u"{} ({}/{})".format(name, size_dict[name], sym_size_dict[name])
for name in sorted(type_dict.keys(), key=lambda l: -type_dict[l])]
ax.set_yticklabels(labels)
ax.set_xlim(-.02, 1.02)
ax.set_ylim(top=.5, # Add some space add the top
bottom=-(len(type_dict) - .5) # number of entries from 0 to -len(type_dict) + 1 => remove some space
)
fig.tight_layout()
fig.savefig(os.path.join(args.target_dir, 'networkrepository_scatter_rG_type.pgf'))
plt.close()
def create_histogram_logx(df, attr, bins, latex=True):
attr_output = attr if not latex else "${}$".format(attr)
xlim = (0, bins[-1] + bins[-2])
fig = plt.figure()
ax1 = plt.subplot2grid((4, 1), (0, 0), rowspan=2)
ax1.set_xscale('symlog') # Allows negative values => we only have positive ones but want a real lower bound of 0
# Create a histogram plot for the complete data
plt.hist(df[~df['num_orbits'].isnull()][attr], bins=bins, log=True, label="{} for all graphs".format(attr_output))
# Add the histogram plot for symmetric graphs
plt.hist(df[(df['n'] > df['num_orbits']) & ~df['num_orbits'].isnull()][attr], bins=bins, log=True,
label="{} for symmetric graphs".format(attr_output), hatch='//')
ax1.set_xlabel(attr_output)
ax1.set_ylabel("Frequency")
ax1.set_xlim(*xlim) # Set limits, on the right, add some small space
ax1.legend(loc=0)
ax1.set_yscale('linear') # "Reset" y axis to linear scale
ax2 = plt.subplot2grid((4, 1), (2, 0))
ax2.set_xscale('symlog')
plt.boxplot(df[~df['num_orbits'].isnull()][attr], vert=False, widths=.8)
ax2.yaxis.set_ticklabels([])
ax2.set_xlim(*xlim)
ax2.set_ylim(.4, 1.6)
ax2.set_xlabel("Box-plot for all graphs")
ax3 = plt.subplot2grid((4, 1), (3, 0))
ax3.set_xscale('symlog')
df2 = df[(df['n'] > df['num_orbits']) & ~df['num_orbits'].isnull()]
df2.index = list(range(len(df2)))
plt.boxplot(df2[attr], vert=False, widths=.8)
ax3.yaxis.set_ticklabels([])
ax3.set_xlim(*xlim)
ax3.set_ylim(.4, 1.6)
ax3.set_xlabel("Box-plot for symmetric graphs")
fig.tight_layout()
return fig, (ax1, ax2, ax3)
def create_histogram_and_boxplot(df, attr, num_bins=20, rng=(0, 1), margin=0.02, attr_name=None, xlabel=None):
if not attr_name:
attr_name = attr
if not xlabel:
xlabel = attr_name
bins = [i / num_bins * (rng[1] - rng[0]) for i in range(num_bins + 1)]
width = rng[1] - rng[0]
xlim = (rng[0] - margin*width, rng[1] + margin*width)
fig = plt.figure()
ax1 = plt.subplot2grid((4, 1), (0, 0), rowspan=2)
# Create a histogram plot for the complete data
plt.hist(df[~df['num_orbits'].isnull()][attr], bins=bins, label="{} for all graphs".format(attr_name))
# Add the histogram plot for symmetric graphs
plt.hist(df[(df['n'] > df['num_orbits']) & ~df['num_orbits'].isnull()][attr], bins=bins,
label="{} for symmetric graphs".format(attr_name), hatch='//')
ax1.set_xlabel(xlabel)
ax1.set_ylabel("Frequency")
ax1.set_xlim(*xlim) # Add space to the left and right
ax1.legend(loc=0)
ax1.tick_params(labelright=True)
ax2 = plt.subplot2grid((4, 1), (2, 0))
plt.boxplot(df[~df['num_orbits'].isnull()][attr], vert=False, widths=.8)
ax2.yaxis.set_ticklabels([])
ax2.set_xlim(*xlim)
ax2.set_ylim(.4, 1.6)
ax2.set_xlabel("Box-plot for all graphs")
ax3 = plt.subplot2grid((4, 1), (3, 0))
df2 = df[(df['n'] > df['num_orbits']) & ~df['num_orbits'].isnull()]
df2.index = list(range(len(df2)))
plt.boxplot(df2[attr], vert=False, widths=.8)
ax3.yaxis.set_ticklabels([])
ax3.set_xlim(*xlim)
ax3.set_ylim(.4, 1.6)
ax3.set_xlabel("Box-plot for symmetric graphs")
fig.tight_layout()
return fig, (ax1, ax2, ax3)
def create_histograms(df, args):
bins = [0] + [10 ** i for i in range(0, 8)]
# *** Histograms for n and m ***
# axes = create_histogram_logx(df, 'n', bins=bins)
fig, axes = create_histogram_logx(df, 'n', bins=bins)
plt.tick_params(labelright=True)
plt.savefig(os.path.join(args.target_dir, 'networkrepository_hist_n.pgf'))
plt.close()
bins = [0] + [10 ** i for i in range(0, 9)]
# axes = create_histogram_logx(df, 'm', bins=bins)
fig, axes = create_histogram_logx(df, 'm', bins=bins)
plt.tick_params(labelright=True)
plt.savefig(os.path.join(args.target_dir, 'networkrepository_hist_m.pgf'))
plt.close()
# *** Histogram for Modularity and density ***
fig, axes = create_histogram_and_boxplot(df, 'modularity', attr_name='Modularity $Q$', xlabel='$Q$')
plt.savefig(os.path.join(args.target_dir, 'networkrepository_hist_modularity.pgf'))
plt.close()
fig, axes = create_histogram_and_boxplot(df, 'density', attr_name='Density $\\rho$', xlabel='$\\rho$')
plt.savefig(os.path.join(args.target_dir, 'networkrepository_hist_density.pgf'))
plt.close()
# *** Histogram for redundancy ***
rng = (0, 1)
num_bins = 20
bins = [i / num_bins * (rng[1] - rng[0]) for i in range(num_bins+1)]
fig = plt.figure()
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
plt.hist(df[~df['redundancy'].isnull()]['redundancy'], bins=bins, label='Network redundancy $r_G\'$')
# plt.hist(df[~df['redundancy_m'].isnull()]['redundancy_m'], bins=bins, label='Network redundancy $r\'_G$')
ax1.set_xlim(-.02, 1.02) # Add space to the left and right
# Get the number of asymmetric and transitive graphs
no_asymmetric = len(df[df['redundancy'] == 0])
no_transitive = len(df[df['redundancy'] == 1])
ax1.set_xlabel("$r_G'$")
# ax1.set_xlabel("$r\'_G$")
ax1.set_ylabel("Frequency")
# Add these numbers with bar markers
plt.plot([0, 1], [no_asymmetric, no_transitive], 'b_')
# Add ticks for those two values
yticks = list(ax1.get_yticks())
ax1.yaxis.set_ticks(yticks + [no_transitive, no_asymmetric])
# Use the tick positions as labels, except for the two new ticks
ax1.yaxis.set_ticklabels(list(map(int, yticks)) + ['', ''])
# Add annotations for the asymmetric/transitive number of graphs
arrowprops = {'color': 'black', 'arrowstyle': '-|>', 'relpos': (0.3, 0.5)}
plt.annotate('Transitive graphs ({})'.format(no_transitive), xy=(1, no_transitive), xytext=(0.6, 4*no_transitive),
arrowprops=arrowprops, size=params["axes.labelsize"])
arrowprops['relpos'] = (0.7, 0.5)
plt.annotate('Asymmetric graphs ({})'.format(no_asymmetric), xy=(0, no_asymmetric), xytext=(0.1, 0.7*no_asymmetric),
arrowprops=arrowprops, size=params["axes.labelsize"])
# Show tick labels on the right
ax1.tick_params(labelright=True)
ax2 = plt.subplot2grid((3, 1), (2, 0))
plt.boxplot(df[~df['num_orbits'].isnull()]['redundancy'], vert=False, widths=.8)
ax2.yaxis.set_ticklabels([])
ax2.set_xlim(-.02, 1.02)
ax2.set_ylim(.4, 1.6)
ax2.set_xlabel("Box-plot for all graphs")
fig.tight_layout()
plt.savefig(os.path.join(args.target_dir, 'networkrepository_hist_redundancy.pgf'))
plt.close()
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument('infile_analysis', help='Path to the analysis csv file')
argparser.add_argument('infile_stats', help='Path to the stats csv file')
argparser.add_argument('target_dir',
help='Path to an (existing and writable!) directory to dump output',
type=PathType)
argparser.add_argument('--keep_duplicates', help='Keep duplicates (identified by name)',
type=bool, default=False, nargs='?', const=True)
args = argparser.parse_args()
global DROP_DUPLICATES
DROP_DUPLICATES = not args.keep_duplicates
# =================================
# Data loading
# =================================
df_a = pd.read_csv(args.infile_analysis, index_col=0)
df_s =
|
pd.read_csv(args.infile_stats, index_col=0)
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import xgboost as xgb
import random
from sklearn.metrics import accuracy_score, confusion_matrix, roc_curve
# from xgboost.sklearn import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn import cross_validation, metrics
from sklearn.model_selection import RandomizedSearchCV
import matplotlib.pylab as plt
from sklearn import metrics
from matplotlib.pylab import rcParams
if __name__ == '__main__':
rcParams['figure.figsize'] = 12, 4
def data_balance(file_dataframe, balance_type, propotion_del):
type_list = file_dataframe["service_type_encode"].tolist()
site_sample = []
for i in range(len(balance_type)):
site = [j for j in range(len(type_list)) if type_list[j] == balance_type[i]]
num = round(len(site) * propotion_del[i])
site_sample += random.sample(site, num)
site_total = [k for k in range(len(type_list))]
for m in site_sample:
site_total.remove(m)
balance_data = file_dataframe.iloc[site_total]
return balance_data
def F1_score(confusion_max):
precision = []
recall = []
F1 = []
class_num = len(confusion_max)
for i in range(class_num):
temp_row = confusion_max[i]
TP = temp_row[i]
FN_sum = sum(temp_row)
temp_column = confusion_max[:, i]
FP_sum = sum(temp_column)
pre = TP / max(FP_sum, 1)
rec = TP / max(FN_sum, 1)
f1 = (2 * pre * rec) / max((pre + rec), 1)
F1.append(f1)
precision.append(pre)
recall.append(rec)
print("F1")
print(F1)
print("precision")
print(precision)
print("recall")
print(recall)
F_score = ((1 / len(F1)) * sum(F1)) ** 2
return F_score
def decode(encode_list):
final_re = []
for i in encode_list:
if i == 8:
final_re.append(90063345)
if i == 9:
final_re.append(90109916)
if i == 10:
final_re.append(90155946)
return final_re
raw_data = pd.read_csv(r"E:\CCFDF\plansmatching\data\raw data\final_data\class_1_train_new.csv",
encoding="utf-8", low_memory=False)
# raw_data = data_balance(raw_data, [0, 1, 6], [0.7, 0.4, 0.2])
# num_total = len(raw_data)
# random_site = random.sample(range(num_total), round(num_total*0.3))
# raw_data = raw_data.iloc[random_site]
#
# para_list = ['is_mix_service',
# '1_total_fee_norm', '2_total_fee_norm', '3_total_fee_norm', '4_total_fee_norm',
# 'many_over_bill', 'contract_type',
# 'is_promise_low_consume', 'net_service', 'pay_times',
# 'gender',
# 'complaint_level',
# 'online_time_norm', 'fee_mean_norm', 'fee_std_norm',
# 'fee_fluctuate_norm', 'month_traffic_norm', 'contract_time_norm', 'pay_num_norm',
# 'last_month_traffic_norm', 'local_trafffic_month_norm', 'local_caller_time_norm',
# 'service1_caller_time_norm', 'service2_caller_time_norm', 'age_norm', 'former_complaint_num_norm',
# 'former_complaint_fee_norm', 'fee_mean_2_norm', 'service_caller_time_fluctuate_norm',
# 'month_traffic_precentage', 'contract_time_precentage',
# 'pay_times_precentage', 'pay_num_precentage', 'last_month_traffic_precentage',
# 'local_trafffic_month_precentage', 'local_caller_time_precentage', 'service1_caller_time_precentage',
# 'service2_caller_time_precentage',
# 'user_id'
# ]
# par_list = ['month_traffic_norm', '3_total_fee_norm', '1_total_fee_norm', 'fee_std_norm', '2_total_fee_norm',
# 'service2_caller_time_norm', 'online_time_norm', 'local_trafffic_month_norm', '4_total_fee_norm',
# 'fee_fluctuate_norm', 'fee_mean_2_norm', 'service_caller_time_fluctuate_norm', 'local_caller_time_norm',
# 'fee_mean_norm', 'last_month_traffic_norm', 'age_norm', 'pay_num_norm', 'contract_type',
# 'contract_time_norm', 'service1_caller_time_norm']
# para_list = ['is_mix_service',
# '1_total_fee', '2_total_fee', '3_total_fee', '4_total_fee',
# 'many_over_bill', 'contract_type',
# 'is_promise_low_consume', 'net_service', 'pay_times',
# 'gender',
# 'complaint_level',
# 'online_time', 'fee_mean', 'fee_std',
# 'fee_fluctuate', 'month_traffic', 'contract_time', 'pay_num',
# 'last_month_traffic', 'local_trafffic_month', 'local_caller_time',
# 'service1_caller_time', 'service2_caller_time', 'age_norm', 'former_complaint_num_norm',
# 'former_complaint_fee', 'fee_mean_2', 'service_caller_time_fluctuate',
# 'month_traffic_precentage', 'contract_time_precentage',
# 'pay_times_precentage', 'pay_num_precentage', 'last_month_traffic_precentage',
# 'local_trafffic_month_precentage', 'local_caller_time_precentage', 'service1_caller_time_precentage',
# 'service2_caller_time_precentage',
# 'user_id'
# ]
para_list = ['is_mix_service', 'online_time', '1_total_fee', '2_total_fee', '3_total_fee', '4_total_fee',
'1_total_fee_norm', '2_total_fee_norm', '3_total_fee_norm', '4_total_fee_norm',
'month_traffic', 'many_over_bill', 'contract_type', 'contract_time',
'is_promise_low_consume', 'net_service', 'pay_times', 'pay_num', 'last_month_traffic',
'local_trafffic_month', 'local_caller_time', 'service1_caller_time', 'service2_caller_time', 'gender',
'age', 'complaint_level', 'former_complaint_num', 'former_complaint_fee',
'fee_mean', 'fee_std', 'fee_fluctuate', 'fee_mean_2',
'service_caller_time_fluctuate', 'online_time_norm', 'fee_mean_norm', 'fee_std_norm',
'fee_fluctuate_norm', 'month_traffic_norm', 'contract_time_norm', 'pay_num_norm',
'last_month_traffic_norm', 'local_trafffic_month_norm', 'local_caller_time_norm',
'service1_caller_time_norm', 'service2_caller_time_norm', 'age_norm', 'former_complaint_num_norm',
'former_complaint_fee_norm', 'fee_mean_2_norm', 'service_caller_time_fluctuate_norm',
'user_id']
# para_list = ['1_total_fee', '3_total_fee', 'month_traffic', '2_total_fee', 'online_time', '4_total_fee',
# 'service2_caller_time', 'last_month_traffic', 'local_trafffic_month', 'fee_std',
# 'service_caller_time_fluctuate', 'fee_fluctuate', 'fee_mean_2', 'pay_num', 'local_caller_time',
# 'fee_mean', 'age', 'contract_type', 'contract_time', 'service1_caller_time', '1_total_fee_norm',
# '2_total_fee_norm', '3_total_fee_norm', '4_total_fee_norm', 'user_id']
label = raw_data["service_type_encode"].tolist()
par_list = para_list[:len(para_list) - 1]
label_train, label_test, data_train, data_test = train_test_split(label, raw_data[par_list], test_size=0.015)
eta_list = [0.1]
# eta_ori =0.1
F1_best = 0
F_sc_list = []
for eta in eta_list:
m_class = xgb.XGBClassifier(learning_rate=eta, n_estimators=1500, max_depth=7, min_child_weight=6, gamma=0,
subsample=0.8, n_jobs=-1, colsample_bytree=0.8, objective='binary:logistic', seed=0)
# 训练
m_class.fit(data_train, label_train)
test_8 = m_class.predict(data_test)
print("Test Accuracy : %.2f" % accuracy_score(label_test, test_8))
confusion_mat = confusion_matrix(label_test, test_8)
print("Test confusion matrix")
print(confusion_mat)
F_sc = F1_score(confusion_mat)
print("test F1_score")
print(F_sc)
F_sc_list.append(F_sc)
if F_sc > F1_best:
F1_best = F_sc
best_learning_rate = eta
print("best F1")
print(F1_best)
print("Best learning rate")
print(best_learning_rate)
# # plot training and test errors vs number of trees in ensemble
# plt.plot(eta_list, F_sc_list)
# plt.xlabel('Attributes')
# plt.ylabel('F1_score')
# # plot.ylim([0.0, 1.1*max(mseOob)])
# plt.show()
# test_2 = m_class.predict_proba(X_test)
# 查看AUC评价标准
# 查看重要程度
attributes_name = np.array(par_list[:len(par_list)])
featureImportance = m_class.feature_importances_
idxSorted = np.argsort(featureImportance)
barPos = np.arange(idxSorted.shape[0]) + .5
plt.barh(barPos, featureImportance[idxSorted], align='center')
plt.yticks(barPos, attributes_name[idxSorted])
plt.xlabel('Variable Importance')
plt.show()
##必须二分类才能计算
##print "AUC Score (Train): %f" % metrics.roc_auc_score(y_test, test_2)
#
data_submit_raw = pd.read_csv(
r"E:\CCFDF\plansmatching\data\raw data\final_data\class_1_test_new.csv",
encoding="utf-8", low_memory=False)
data_submit = data_submit_raw[par_list]
submit_label_encode = m_class.predict(data_submit)
decode_list = decode(submit_label_encode)
user_id_4 = data_submit_raw["user_id"]
submit_result = pd.read_csv(r"E:\CCFDF\plansmatching\data\raw data\final_data\result_test\XGBoost_optimization.csv",
encoding="utf-8", low_memory=False)
origin_id = submit_result["user_id"].tolist()
origin_result = submit_result["current_service"].tolist()
num_4 = len(user_id_4)
for i in range(num_4):
origin_result[origin_id.index(user_id_4[i])] = decode_list[i]
final_da =
|
pd.DataFrame({"user_id": origin_id, "current_service": origin_result})
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import math
import os
from scipy.interpolate import interp1d
import time
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from information_measures import *
from joblib import Parallel, delayed
#from arch import arch_model
def rmspe(y_true, y_pred):
return (np.sqrt(np.mean(np.square((y_true - y_pred) / y_true))))
def log_return(list_stock_prices): # Stock prices are estimated through wap values
return np.log(list_stock_prices).diff()
def realized_volatility(series_log_return):
return np.sqrt(np.sum(series_log_return**2))
def compute_wap(book_pd):
wap = (book_pd['bid_price1'] * book_pd['ask_size1'] + book_pd['ask_price1'] * book_pd['bid_size1']) / (book_pd['bid_size1']+ book_pd['ask_size1'])
return wap
def realized_volatility_from_book_pd(book_stock_time):
wap = compute_wap(book_stock_time)
returns = log_return(wap)
volatility = realized_volatility(returns)
return volatility
def realized_volatility_per_time_id(file_path, prediction_column_name):
df_book_data = pd.read_parquet(file_path)
# Estimate stock price per time point
df_book_data['wap'] = compute_wap(df_book_data)
# Compute log return from wap values per time_id
df_book_data['log_return'] = df_book_data.groupby(['time_id'])['wap'].apply(log_return)
df_book_data = df_book_data[~df_book_data['log_return'].isnull()]
# Compute the square root of the sum of log return squared to get realized volatility
df_realized_vol_per_stock = pd.DataFrame(df_book_data.groupby(['time_id'])['log_return'].agg(realized_volatility)).reset_index()
# Formatting
df_realized_vol_per_stock = df_realized_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
stock_id = file_path.split('=')[1]
df_realized_vol_per_stock['row_id'] = df_realized_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
return df_realized_vol_per_stock[['row_id',prediction_column_name]]
def past_realized_volatility_per_stock(list_file,prediction_column_name):
df_past_realized = pd.DataFrame()
for file in list_file:
df_past_realized = pd.concat([df_past_realized,
realized_volatility_per_time_id(file,prediction_column_name)])
return df_past_realized
def stupidForestPrediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test):
naive_predictions_train = past_realized_volatility_per_stock(list_file=book_path_train,prediction_column_name=prediction_column_name)
df_joined_train = train_targets_pd.merge(naive_predictions_train[['row_id','pred']], on = ['row_id'], how = 'left')
X = np.array(df_joined_train['pred']).reshape(-1,1)
y = np.array(df_joined_train['target']).reshape(-1,)
regr = RandomForestRegressor(random_state=0)
regr.fit(X, y)
naive_predictions_test = past_realized_volatility_per_stock(list_file=book_path_test,prediction_column_name='target')
yhat = regr.predict(np.array(naive_predictions_test['target']).reshape(-1,1))
updated_predictions = naive_predictions_test.copy()
updated_predictions['target'] = yhat
return updated_predictions
def garch_fit_predict_volatility(returns_series, N=10000):
model = arch_model(returns_series * N, p=1, q=1)
model_fit = model.fit(update_freq=0, disp='off')
yhat = model_fit.forecast(horizon=600, reindex=False)
pred_volatility = np.sqrt(np.sum(yhat.variance.values)) / N
return pred_volatility
def garch_volatility_per_time_id(file_path, prediction_column_name):
# read the data
df_book_data = pd.read_parquet(file_path)
# calculate the midprice (not the WAP)
df_book_data['midprice'] =(df_book_data['bid_price1'] + df_book_data['ask_price1'])/2
# leave only WAP for now
df_book_data = df_book_data[['time_id', 'seconds_in_bucket', 'midprice']]
df_book_data = df_book_data.sort_values('seconds_in_bucket')
# make the book updates evenly spaced
df_book_data_evenly = pd.DataFrame({'time_id':np.repeat(df_book_data['time_id'].unique(), 600),
'second':np.tile(range(0,600), df_book_data['time_id'].nunique())})
df_book_data_evenly['second'] = df_book_data_evenly['second'].astype(np.int16)
df_book_data_evenly = df_book_data_evenly.sort_values('second')
df_book_data_evenly = pd.merge_asof(df_book_data_evenly,
df_book_data,
left_on='second',right_on='seconds_in_bucket',
by = 'time_id')
# Ordering for easier use
df_book_data_evenly = df_book_data_evenly[['time_id', 'second', 'midprice']]
df_book_data_evenly = df_book_data_evenly.sort_values(['time_id','second']).reset_index(drop=True)
# calculate log returns
df_book_data_evenly['log_return'] = df_book_data_evenly.groupby(['time_id'])['midprice'].apply(log_return)
df_book_data_evenly = df_book_data_evenly[~df_book_data_evenly['log_return'].isnull()]
# fit GARCH(1, 1) and predict the volatility of returns
df_garch_vol_per_stock = \
pd.DataFrame(df_book_data_evenly.groupby(['time_id'])['log_return'].agg(garch_fit_predict_volatility)).reset_index()
df_garch_vol_per_stock = df_garch_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
# add row_id column to the data
stock_id = file_path.split('=')[1]
df_garch_vol_per_stock['row_id'] = df_garch_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
# return the result
return df_garch_vol_per_stock[['row_id', prediction_column_name]]
def garch_volatility_per_stock(list_file, prediction_column_name):
df_garch_predicted = pd.DataFrame()
for file in list_file:
df_garch_predicted = pd.concat([df_garch_predicted,
garch_volatility_per_time_id(file, prediction_column_name)])
return df_garch_predicted
def entropy_from_book(book_stock_time,last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 3:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_wap(wap,seconds,last_seconds):
if last_seconds < 600:
idx = np.where(seconds >= last_seconds)[0]
if len(idx) < 3:
return 0
else:
wap = wap[idx]
seconds = seconds[idx]
# Closest neighbour interpolation (no changes in wap between lines)
t_new = np.arange(np.min(seconds),np.max(seconds))
nearest = interp1d(seconds, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
# sampleEntropy = ApEn_new(resampled_wap,3,0.001)
return sampleEntropy
def linearFit(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = np.array(compute_wap(book_stock_time))
t_init = book_stock_time['seconds_in_bucket']
return (wap[-1] - wap[0])/(np.max(t_init) - np.min(t_init))
def wapStat(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
return np.std(resampled_wap)
def entropy_Prediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test,all_stocks_ids,test_file):
# Compute features
book_features_encoded_test = computeFeatures_1(book_path_test,'test',test_file,all_stocks_ids)
book_features_encoded_train = computeFeatures_1(book_path_train,'train',train_targets_pd,all_stocks_ids)
X = book_features_encoded_train.drop(['row_id','target','stock_id'],axis=1)
y = book_features_encoded_train['target']
# Modeling
catboost_default = CatBoostRegressor(verbose=0)
catboost_default.fit(X,y)
# Predict
X_test = book_features_encoded_test.drop(['row_id','stock_id'],axis=1)
yhat = catboost_default.predict(X_test)
# Formatting
yhat_pd = pd.DataFrame(yhat,columns=['target'])
predictions = pd.concat([test_file,yhat_pd],axis=1)
return predictions
def computeFeatures_1(book_path,prediction_column_name,train_targets_pd,all_stocks_ids):
book_all_features = pd.DataFrame()
encoder = np.eye(len(all_stocks_ids))
stocks_id_list, row_id_list = [], []
volatility_list, entropy2_list = [], []
linearFit_list, linearFit5_list, linearFit2_list = [], [], []
wap_std_list, wap_std5_list, wap_std2_list = [], [], []
for file in book_path:
start = time.time()
book_stock = pd.read_parquet(file)
stock_id = file.split('=')[1]
print('stock id computing = ' + str(stock_id))
stock_time_ids = book_stock['time_id'].unique()
for time_id in stock_time_ids:
# Access book data at this time + stock
book_stock_time = book_stock[book_stock['time_id'] == time_id]
# Create feature matrix
stocks_id_list.append(stock_id)
row_id_list.append(str(f'{stock_id}-{time_id}'))
volatility_list.append(realized_volatility_from_book_pd(book_stock_time=book_stock_time))
entropy2_list.append(entropy_from_book(book_stock_time=book_stock_time,last_min=2))
linearFit_list.append(linearFit(book_stock_time=book_stock_time,last_min=10))
linearFit5_list.append(linearFit(book_stock_time=book_stock_time,last_min=5))
linearFit2_list.append(linearFit(book_stock_time=book_stock_time,last_min=2))
wap_std_list.append(wapStat(book_stock_time=book_stock_time,last_min=10))
wap_std5_list.append(wapStat(book_stock_time=book_stock_time,last_min=5))
wap_std2_list.append(wapStat(book_stock_time=book_stock_time,last_min=2))
print('Computing one stock entropy took', time.time() - start, 'seconds for stock ', stock_id)
# Merge targets
stocks_id_pd = pd.DataFrame(stocks_id_list,columns=['stock_id'])
row_id_pd = pd.DataFrame(row_id_list,columns=['row_id'])
volatility_pd = pd.DataFrame(volatility_list,columns=['volatility'])
entropy2_pd = pd.DataFrame(entropy2_list,columns=['entropy2'])
linearFit_pd = pd.DataFrame(linearFit_list,columns=['linearFit_coef'])
linearFit5_pd = pd.DataFrame(linearFit5_list,columns=['linearFit_coef5'])
linearFit2_pd = pd.DataFrame(linearFit2_list,columns=['linearFit_coef2'])
wap_std_pd = pd.DataFrame(wap_std_list,columns=['wap_std'])
wap_std5_pd = pd.DataFrame(wap_std5_list,columns=['wap_std5'])
wap_std2_pd = pd.DataFrame(wap_std2_list,columns=['wap_std2'])
book_all_features = pd.concat([stocks_id_pd,row_id_pd,volatility_pd,entropy2_pd,linearFit_pd,linearFit5_pd,linearFit2_pd,
wap_std_pd,wap_std5_pd,wap_std2_pd],axis=1)
# This line makes sure the predictions are aligned with the row_id in the submission file
book_all_features = train_targets_pd.merge(book_all_features, on = ['row_id'])
# Add encoded stock
encoded = list()
for i in range(book_all_features.shape[0]):
stock_id = book_all_features['stock_id'][i]
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(book_all_features.shape[0],np.array(all_stocks_ids).shape[0]))
book_all_features_encoded = pd.concat([book_all_features, encoded_pd],axis=1)
return book_all_features_encoded
def calc_wap(df):
return (df['bid_price1'] * df['ask_size1'] + df['ask_price1'] * df['bid_size1']) / (df['bid_size1'] + df['ask_size1'])
def calc_wap2(df):
return (df['bid_price2'] * df['ask_size2'] + df['ask_price2'] * df['bid_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap3(df):
return (df['bid_price2'] * df['bid_size2'] + df['ask_price2'] * df['ask_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap4(df):
return (df['bid_price1'] * df['bid_size1'] + df['ask_price1'] * df['ask_size1']) / (df['bid_size1'] + df['ask_size1'])
def mid_price(df):
return df['bid_price1'] /2 + df['ask_price1'] / 2
def calc_rv_from_wap_numba(values, index):
log_return = np.diff(np.log(values))
realized_vol = np.sqrt(np.sum(np.square(log_return[1:])))
return realized_vol
def load_book_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'book_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def load_trades_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'trade_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def entropy_from_df(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df2(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap2'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df3(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap3'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def financial_metrics(df):
wap_imbalance = np.mean(df['wap'] - df['wap2'])
price_spread = np.mean((df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2))
bid_spread = np.mean(df['bid_price1'] - df['bid_price2'])
ask_spread = np.mean(df['ask_price1'] - df['ask_price2']) # Abs to take
total_volume = np.mean((df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2']))
volume_imbalance = np.mean(abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2'])))
return [wap_imbalance,price_spread,bid_spread,ask_spread,total_volume,volume_imbalance]
def financial_metrics_2(df):
wap_imbalance = df['wap'] - df['wap2']
price_spread = (df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2)
bid_spread = df['bid_price1'] - df['bid_price2']
ask_spread = df['ask_price1'] - df['ask_price2'] # Abs to take
total_volume = (df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2'])
volume_imbalance = abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2']))
# New features here
wap_imbalance_mean = np.mean(wap_imbalance)
wap_imbalance_sum = np.sum(wap_imbalance)
wap_imbalance_std = np.std(wap_imbalance)
wap_imbalance_max = np.max(wap_imbalance)
wap_imbalance_min = np.min(wap_imbalance)
price_spread_mean = np.mean(price_spread)
price_spread_sum = np.sum(price_spread)
price_spread_std = np.std(price_spread)
price_spread_max = np.max(price_spread)
price_spread_min = np.min(price_spread)
bid_spread_mean = np.mean(bid_spread)
bid_spread_sum = np.sum(bid_spread)
bid_spread_std = np.std(bid_spread)
bid_spread_max = np.max(bid_spread)
bid_spread_min = np.min(bid_spread)
ask_spread_mean = np.mean(ask_spread)
ask_spread_sum = np.sum(ask_spread)
ask_spread_std = np.std(ask_spread)
ask_spread_max = np.max(ask_spread)
ask_spread_min = np.min(ask_spread)
total_volume_mean = np.mean(total_volume)
total_volume_sum = np.sum(total_volume)
total_volume_std = np.std(total_volume)
total_volume_max = np.max(total_volume)
total_volume_min = np.min(total_volume)
volume_imbalance_mean = np.mean(volume_imbalance)
volume_imbalance_sum = np.sum(volume_imbalance)
volume_imbalance_std = np.std(volume_imbalance)
volume_imbalance_max = np.max(volume_imbalance)
volume_imbalance_min = np.min(volume_imbalance)
return [wap_imbalance_mean,price_spread_mean,bid_spread_mean,ask_spread_mean,total_volume_mean,volume_imbalance_mean, wap_imbalance_sum,price_spread_sum,bid_spread_sum,ask_spread_sum,total_volume_sum,volume_imbalance_sum, wap_imbalance_std,price_spread_std,bid_spread_std,ask_spread_std,total_volume_std,volume_imbalance_std, wap_imbalance_max,price_spread_max,bid_spread_max,ask_spread_max,total_volume_max,volume_imbalance_max, wap_imbalance_min,price_spread_min,bid_spread_min,ask_spread_min,total_volume_min,volume_imbalance_min]
def other_metrics(df):
if df.shape[0] < 2:
linearFit = 0
linearFit2 = 0
linearFit3 = 0
std_1 = 0
std_2 = 0
std_3 = 0
else:
linearFit = (df['wap'].iloc[-1] - df['wap'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
linearFit2 = (df['wap2'].iloc[-1] - df['wap2'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
linearFit3 = (df['wap3'].iloc[-1] - df['wap3'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
# Resampling
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
nearest2 = interp1d(t_init, df['wap2'], kind='nearest')
nearest3 = interp1d(t_init, df['wap3'], kind='nearest')
std_1 = np.std(nearest(t_new))
std_2 = np.std(nearest2(t_new))
std_3 = np.std(nearest3(t_new))
return [linearFit, linearFit2, linearFit3, std_1, std_2, std_3]
def load_book_data_by_id_kaggle(stock_id,train_test):
df = pd.read_parquet(f'../input/optiver-realized-volatility-prediction/book_{train_test}.parquet/stock_id={stock_id}')
return df
def load_trades_data_by_id_kaggle(stock_id,train_test):
df = pd.read_parquet(f'../input/optiver-realized-volatility-prediction/trade_{train_test}.parquet/stock_id={stock_id}')
return df
def computeFeatures_wEntropy(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
# Calculate realized volatility
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3']],axis=1)
df_sub = df_sub.rename(columns={'time_id':'row_id','wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3'})
# Calculate realized volatility last 5 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_5['time_id']]
df_sub_5 = pd.concat([df_sub_5,df_sub2_5['wap2'],df_sub3_5['wap3']],axis=1)
df_sub_5 = df_sub_5.rename(columns={'time_id':'row_id','wap': 'rv_5', 'wap2': 'rv2_5', 'wap3': 'rv3_5'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_5'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_5'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_5'])
df_sub_5 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
# Calculate realized volatility last 2 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_sub_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_2['time_id']]
df_sub_2 = pd.concat([df_sub_2,df_sub2_2['wap2'],df_sub3_2['wap3']],axis=1)
df_sub_2 = df_sub_2.rename(columns={'time_id':'row_id','wap': 'rv_2', 'wap2': 'rv2_2', 'wap3': 'rv3_2'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_2'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_2'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_2'])
df_sub_2 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
list_rv.append(df_sub)
list_rv2.append(df_sub_5)
list_rv3.append(df_sub_2)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_book_feats5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={0:'embedding'})
df_sub_book_feats5[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats5.embedding.tolist(), index=df_sub_book_feats5.index)
df_sub_book_feats5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats5['time_id']]
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats5 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin.append(df_sub_book_feats)
list_fin2.append(df_sub_book_feats5)
# Compute entropy
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_ent = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df).to_frame().reset_index().fillna(0)
df_ent2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df2).to_frame().reset_index().fillna(0)
df_ent3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df3).to_frame().reset_index().fillna(0)
df_ent['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_ent['time_id']]
df_ent = df_ent.rename(columns={'time_id':'row_id',0:'entropy'})
df_ent2 = df_ent2.rename(columns={0:'entropy2'}).drop(['time_id'],axis=1)
df_ent3 = df_ent3.rename(columns={0:'entropy3'}).drop(['time_id'],axis=1)
df_ent = pd.concat([df_ent,df_ent2,df_ent3],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['entropy'])
temp2 = pd.DataFrame([0],columns=['entropy2'])
temp3 = pd.DataFrame([0],columns=['entropy3'])
df_ent = pd.concat([times_pd,temp,temp2,temp3],axis=1)
list_ent.append(df_ent)
# Compute other metrics
df_others = book_stock.groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others = df_others.rename(columns={0:'embedding'})
df_others[['linearFit1_1','linearFit1_2','linearFit1_3','wap_std1_1','wap_std1_2','wap_std1_3']] = pd.DataFrame(df_others.embedding.tolist(), index=df_others.index)
df_others['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others['time_id']]
df_others = df_others.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_others.append(df_others)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_others2 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others2 = df_others2.rename(columns={0:'embedding'})
df_others2[['linearFit2_1','linearFit2_2','linearFit2_3','wap_std2_1','wap_std2_2','wap_std2_3']] = pd.DataFrame(df_others2.embedding.tolist(), index=df_others2.index)
df_others2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others2['time_id']]
df_others2 = df_others2.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit2_1'])
temp2 = pd.DataFrame([0],columns=['linearFit2_2'])
temp3 = pd.DataFrame([0],columns=['linearFit2_3'])
temp4 = pd.DataFrame([0],columns=['wap_std2_1'])
temp5 = pd.DataFrame([0],columns=['wap_std2_2'])
temp6 = pd.DataFrame([0],columns=['wap_std2_3'])
df_others2 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others2.append(df_others2)
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_others3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others3 = df_others3.rename(columns={0:'embedding'})
df_others3[['linearFit3_1','linearFit3_2','linearFit3_3','wap_std3_1','wap_std3_2','wap_std3_3']] = pd.DataFrame(df_others3.embedding.tolist(), index=df_others3.index)
df_others3['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others3['time_id']]
df_others3 = df_others3.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit3_1'])
temp2 = pd.DataFrame([0],columns=['linearFit3_2'])
temp3 = pd.DataFrame([0],columns=['linearFit3_3'])
temp4 = pd.DataFrame([0],columns=['wap_std3_1'])
temp5 = pd.DataFrame([0],columns=['wap_std3_2'])
temp6 = pd.DataFrame([0],columns=['wap_std3_3'])
df_others3 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others3.append(df_others3)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_ent_concat = pd.concat(list_ent)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_others = pd.concat(list_others)
df_others2 = pd.concat(list_others2)
df_others3 = pd.concat(list_others3)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_ent_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others3, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_july(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
# Calculate realized volatility
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3']],axis=1)
df_sub = df_sub.rename(columns={'time_id':'row_id','wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3'})
# Calculate realized volatility last 5 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_5['time_id']]
df_sub_5 = pd.concat([df_sub_5,df_sub2_5['wap2'],df_sub3_5['wap3']],axis=1)
df_sub_5 = df_sub_5.rename(columns={'time_id':'row_id','wap': 'rv_5', 'wap2': 'rv2_5', 'wap3': 'rv3_5'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_5'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_5'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_5'])
df_sub_5 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
# Calculate realized volatility last 2 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_sub_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_2['time_id']]
df_sub_2 = pd.concat([df_sub_2,df_sub2_2['wap2'],df_sub3_2['wap3']],axis=1)
df_sub_2 = df_sub_2.rename(columns={'time_id':'row_id','wap': 'rv_2', 'wap2': 'rv2_2', 'wap3': 'rv3_2'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_2'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_2'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_2'])
df_sub_2 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
list_rv.append(df_sub)
list_rv2.append(df_sub_5)
list_rv3.append(df_sub_2)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics_2).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_book_feats5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics_2).to_frame().reset_index()
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={0:'embedding'})
df_sub_book_feats5[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats5.embedding.tolist(), index=df_sub_book_feats5.index)
df_sub_book_feats5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats5['time_id']]
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats5 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin.append(df_sub_book_feats)
list_fin2.append(df_sub_book_feats5)
# Compute other metrics
df_others = book_stock.groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others = df_others.rename(columns={0:'embedding'})
df_others[['linearFit1_1','linearFit1_2','linearFit1_3','wap_std1_1','wap_std1_2','wap_std1_3']] = pd.DataFrame(df_others.embedding.tolist(), index=df_others.index)
df_others['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others['time_id']]
df_others = df_others.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_others.append(df_others)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_others2 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others2 = df_others2.rename(columns={0:'embedding'})
df_others2[['linearFit2_1','linearFit2_2','linearFit2_3','wap_std2_1','wap_std2_2','wap_std2_3']] = pd.DataFrame(df_others2.embedding.tolist(), index=df_others2.index)
df_others2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others2['time_id']]
df_others2 = df_others2.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit2_1'])
temp2 = pd.DataFrame([0],columns=['linearFit2_2'])
temp3 = pd.DataFrame([0],columns=['linearFit2_3'])
temp4 = pd.DataFrame([0],columns=['wap_std2_1'])
temp5 = pd.DataFrame([0],columns=['wap_std2_2'])
temp6 = pd.DataFrame([0],columns=['wap_std2_3'])
df_others2 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others2.append(df_others2)
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_others3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others3 = df_others3.rename(columns={0:'embedding'})
df_others3[['linearFit3_1','linearFit3_2','linearFit3_3','wap_std3_1','wap_std3_2','wap_std3_3']] = pd.DataFrame(df_others3.embedding.tolist(), index=df_others3.index)
df_others3['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others3['time_id']]
df_others3 = df_others3.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit3_1'])
temp2 = pd.DataFrame([0],columns=['linearFit3_2'])
temp3 = pd.DataFrame([0],columns=['linearFit3_3'])
temp4 = pd.DataFrame([0],columns=['wap_std3_1'])
temp5 = pd.DataFrame([0],columns=['wap_std3_2'])
temp6 = pd.DataFrame([0],columns=['wap_std3_3'])
df_others3 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others3.append(df_others3)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_ent_concat = pd.concat(list_ent)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_others = pd.concat(list_others)
df_others2 = pd.concat(list_others2)
df_others3 = pd.concat(list_others3)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_ent_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others3, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_newTest_Laurent(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the entire book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
book_stock['wap4'] = calc_wap2(book_stock)
book_stock['mid_price'] = calc_wap3(book_stock)
# Calculate past realized volatility per time_id
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4 = book_stock.groupby('time_id')['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5 = book_stock.groupby('time_id')['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = df_sub.rename(columns={'time_id':'row_id'})
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3'], df_sub4['wap4'], df_sub5['mid_price']],axis=1)
df_sub = df_sub.rename(columns={'wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3', 'wap4':'rv4','mid_price':'rv5'})
list_rv.append(df_sub)
# Query segments
bucketQuery480 = book_stock.query(f'seconds_in_bucket >= 480')
isEmpty480 = bucketQuery480.empty
bucketQuery300 = book_stock.query(f'seconds_in_bucket >= 300')
isEmpty300 = bucketQuery300.empty
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
# Calculate past realized volatility per time_id and query subset
if isEmpty300 == False:
df_sub_300 = bucketQuery300.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_300 = bucketQuery300.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_300 = bucketQuery300.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_300 = bucketQuery300.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_300 = bucketQuery300.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_300 = pd.concat([times_pd,df_sub_300['wap'],df_sub2_300['wap2'],df_sub3_300['wap3'],df_sub4_300['wap4'],df_sub5_300['mid_price']],axis=1)
df_sub_300 = df_sub_300.rename(columns={'wap': 'rv_300', 'wap2_300': 'rv2', 'wap3_300': 'rv3', 'wap4':'rv4_300','mid_price':'rv5_300'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_300'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_300'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_300'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_300'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_300'])
df_sub_300 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv2.append(df_sub_300)
# Calculate realized volatility last 2 min
if isEmpty480 == False:
df_sub_480 = bucketQuery480.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_480 = bucketQuery480.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_480 = bucketQuery480.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_480 = bucketQuery480.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_480 = bucketQuery480.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_480 = pd.concat([times_pd,df_sub_480['wap'],df_sub2_480['wap2'],df_sub3_480['wap3'],df_sub4_480['wap4'],df_sub5_480['mid_price']],axis=1)
df_sub_480 = df_sub_480.rename(columns={'wap': 'rv_480', 'wap2_480': 'rv2', 'wap3_480': 'rv3', 'wap4':'rv4_480','mid_price':'rv5_480'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_480'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_480'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_480'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_480'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_480'])
df_sub_480 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv3.append(df_sub_480)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_fin.append(df_sub_book_feats)
if isEmpty300 == False:
df_sub_book_feats_300 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={0:'embedding'})
df_sub_book_feats_300[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats_300.embedding.tolist(), index=df_sub_book_feats_300.index)
df_sub_book_feats_300['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats_300['time_id']]
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats_300 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin2.append(df_sub_book_feats_300)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_newTest_Laurent_noCode(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the entire book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
book_stock['wap4'] = calc_wap2(book_stock)
book_stock['mid_price'] = calc_wap3(book_stock)
# Calculate past realized volatility per time_id
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4 = book_stock.groupby('time_id')['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5 = book_stock.groupby('time_id')['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = df_sub.rename(columns={'time_id':'row_id'})
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3'], df_sub4['wap4'], df_sub5['mid_price']],axis=1)
df_sub = df_sub.rename(columns={'wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3', 'wap4':'rv4','mid_price':'rv5'})
list_rv.append(df_sub)
# Query segments
bucketQuery480 = book_stock.query(f'seconds_in_bucket >= 480')
isEmpty480 = bucketQuery480.empty
bucketQuery300 = book_stock.query(f'seconds_in_bucket >= 300')
isEmpty300 = bucketQuery300.empty
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
# Calculate past realized volatility per time_id and query subset
if isEmpty300 == False:
df_sub_300 = bucketQuery300.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_300 = bucketQuery300.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_300 = bucketQuery300.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_300 = bucketQuery300.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_300 = bucketQuery300.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_300 = pd.concat([times_pd,df_sub_300['wap'],df_sub2_300['wap2'],df_sub3_300['wap3'],df_sub4_300['wap4'],df_sub5_300['mid_price']],axis=1)
df_sub_300 = df_sub_300.rename(columns={'wap': 'rv_300', 'wap2_300': 'rv2', 'wap3_300': 'rv3', 'wap4':'rv4_300','mid_price':'rv5_300'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_300'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_300'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_300'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_300'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_300'])
df_sub_300 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv2.append(df_sub_300)
# Calculate realized volatility last 2 min
if isEmpty480 == False:
df_sub_480 = bucketQuery480.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_480 = bucketQuery480.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_480 = bucketQuery480.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_480 = bucketQuery480.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_480 = bucketQuery480.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_480 = pd.concat([times_pd,df_sub_480['wap'],df_sub2_480['wap2'],df_sub3_480['wap3'],df_sub4_480['wap4'],df_sub5_480['mid_price']],axis=1)
df_sub_480 = df_sub_480.rename(columns={'wap': 'rv_480', 'wap2_480': 'rv2', 'wap3_480': 'rv3', 'wap4':'rv4_480','mid_price':'rv5_480'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_480'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_480'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_480'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_480'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_480'])
df_sub_480 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv3.append(df_sub_480)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_fin.append(df_sub_book_feats)
if isEmpty300 == False:
df_sub_book_feats_300 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={0:'embedding'})
df_sub_book_feats_300[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats_300.embedding.tolist(), index=df_sub_book_feats_300.index)
df_sub_book_feats_300['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats_300['time_id']]
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats_300 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin2.append(df_sub_book_feats_300)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
return df_book_features
def computeFeatures_newTest_Laurent_wTrades(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
list_trades1, list_trades2 = [], []
list_vlad_book, list_vlad_trades = [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
trades_stock = load_trades_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
trades_stock = load_trades_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the entire book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
book_stock['wap4'] = calc_wap4(book_stock)
book_stock['mid_price'] = mid_price(book_stock)
# Calculate past realized volatility per time_id
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4 = book_stock.groupby('time_id')['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5 = book_stock.groupby('time_id')['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = df_sub.rename(columns={'time_id':'row_id'})
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3'], df_sub4['wap4'], df_sub5['mid_price']],axis=1)
df_sub = df_sub.rename(columns={'wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3', 'wap4':'rv4','mid_price':'rv5'})
list_rv.append(df_sub)
# Query segments
bucketQuery480 = book_stock.query(f'seconds_in_bucket >= 480')
isEmpty480 = bucketQuery480.empty
bucketQuery300 = book_stock.query(f'seconds_in_bucket >= 300')
isEmpty300 = bucketQuery300.empty
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
# Calculate past realized volatility per time_id and query subset
if isEmpty300 == False:
df_sub_300 = bucketQuery300.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_300 = bucketQuery300.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_300 = bucketQuery300.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_300 = bucketQuery300.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_300 = bucketQuery300.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_300 = pd.concat([times_pd,df_sub_300['wap'],df_sub2_300['wap2'],df_sub3_300['wap3'],df_sub4_300['wap4'],df_sub5_300['mid_price']],axis=1)
df_sub_300 = df_sub_300.rename(columns={'wap': 'rv_300', 'wap2': 'rv2_300', 'wap3': 'rv3_300', 'wap4':'rv4_300','mid_price':'rv5_300'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_300'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_300'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_300'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_300'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_300'])
df_sub_300 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv2.append(df_sub_300)
# Calculate realized volatility last 2 min
if isEmpty480 == False:
df_sub_480 = bucketQuery480.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_480 = bucketQuery480.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_480 = bucketQuery480.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_480 = bucketQuery480.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_480 = bucketQuery480.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_480 =
|
pd.concat([times_pd,df_sub_480['wap'],df_sub2_480['wap2'],df_sub3_480['wap3'],df_sub4_480['wap4'],df_sub5_480['mid_price']],axis=1)
|
pandas.concat
|
#
# Copyright (c) 2021 Facebook, Inc. and its affiliates.
#
# This file is part of NeuralDB.
# See https://github.com/facebookresearch/NeuralDB for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import json
from collections import OrderedDict, defaultdict
import numpy as np
import pandas as pd
from neuraldb.evaluation.scoring_functions import f1
def load_experiment(path):
running_score = defaultdict(lambda: defaultdict(int))
running_count = defaultdict(lambda: defaultdict(int))
print(path)
with open(path) as f:
for line in f:
instance = json.loads(line)
actual = instance["actual"]
prediction = instance["prediction"]
local_score = f1(set(actual), set(prediction))
# relation = instance["metadata"]["relation"]
# running_score["relation"][relation] += local_score
# running_count["relation"][relation] += 1
qtype = instance["metadata"]["type"]
if qtype in {"argmin", "argmax", "min", "max"}:
qtype = "minmax"
running_score["type"][qtype] += local_score
running_count["type"][qtype] += 1
running_score["all"][""] += local_score
running_count["all"][""] += 1
scores = {}
for k, v in running_score.items():
for attr, val in v.items():
score = (
running_score[k][attr] / running_count[k][attr]
if running_count[k][attr]
else 0
)
print(f"Running score: {k}\t{attr}\t\t{score}")
scores["_".join([k, attr])] = (
running_score[k][attr] / running_count[k][attr]
if running_count[k][attr]
else 0
)
return scores
if __name__ == "__main__":
ndb_predictions = glob.glob(
"consolidated/work/v2.4_25/**/predictions.jsonl", recursive=True
)
all_experiments = []
for prediction in ndb_predictions:
print(prediction)
experiment = OrderedDict()
for element in prediction.split("/"):
if "," in element:
for kvp in element.split(","):
k, v = kvp.split("=", maxsplit=1)
experiment[k] = v
elif "-" in element:
for kvp in element.split(","):
k, v = kvp.split("-", maxsplit=1)
experiment[k] = v
# experiment["ssg"] = prediction.replace(".jsonl", "").rsplit("_", maxsplit=1)[1]
experiment["dataset"] = prediction.split("/")[2]
if "retriever" not in experiment:
experiment["retriever"] = ""
experiment["path"] = prediction
all_experiments.append(experiment)
print("Reading by experiment: \n\n\n")
for expt in all_experiments:
expt.update(load_experiment(expt["path"]))
del expt["path"]
frame =
|
pd.DataFrame(all_experiments)
|
pandas.DataFrame
|
import pandas as pd
from pandas import Timestamp
import numpy as np
import pytest
import niimpy
from niimpy.util import TZ
df11 = pd.DataFrame(
{"user": ['wAzQNrdKZZax']*3 + ['Afxzi7oI0yyp']*3 + ['lb983ODxEFUD']*3,
"device": ['iMTB2alwYk1B']*3 + ['3Zkk0bhWmyny']*3 + ['n8rndM6J5_4B']*3,
"time": [1547709614.05, 1547709686.036, 1547709722.06, 1547710540.99, 1547710688.469, 1547711339.439, 1547711831.275, 1547711952.182, 1547712028.281 ],
"battery_level": [96, 96, 95, 95, 94, 93, 94, 94, 94],
"battery_status": ['3']*5 + ['2', '2', '3', '3'],
"battery_health": ['2']*9,
"battery_adaptor": ['0']*5+['1', '1', '0', '0'],
"datetime": ['2019-01-17 09:20:14.049999872+02:00', '2019-01-17 09:21:26.036000+02:00', '2019-01-17 09:22:02.060000+02:00',
'2019-01-17 09:35:40.990000128+02:00', '2019-01-17 09:38:08.469000192+02:00', '2019-01-17 09:48:59.438999808+02:00',
'2019-01-17 09:57:11.275000064+02:00', '2019-01-17 09:59:12.181999872+02:00', '2019-01-17 10:00:28.280999936+02:00']
})
df11['datetime'] = pd.to_datetime(df11['datetime'])
df11 = df11.set_index('datetime', drop=False)
def test_get_battery_data():
df=df11.copy()
battery = niimpy.battery.get_battery_data(df)
assert battery.loc[Timestamp('2019-01-17 09:20:14.049999872+02:00'), 'battery_level'] == 96
assert battery.loc[Timestamp('2019-01-17 09:21:26.036000+02:00'), 'battery_health'] == '2'
assert battery.loc[Timestamp('2019-01-17 09:48:59.438999808+02:00'), 'battery_status'] == '2'
assert battery.loc[Timestamp('2019-01-17 09:57:11.275000064+02:00'), 'battery_adaptor'] == '1'
def test_battery_occurrences():
df=df11.copy()
occurances = niimpy.battery.battery_occurrences(df, hours=0, minutes=10)
assert occurances.loc[Timestamp('2019-01-17 09:20:14.049999872+02:00'), 'occurrences'] == 2
assert occurances.loc[Timestamp('2019-01-17 09:40:14.049999872+02:00'), 'occurrences'] == 1
def test_battery_gaps():
df=df11.copy()
gaps = niimpy.battery.battery_gaps(df)
assert gaps.delta.dtype == 'timedelta64[ns]'
assert gaps.tvalue.dtype == 'datetime64[ns, pytz.FixedOffset(120)]'
assert gaps.loc[Timestamp('2019-01-17 09:22:02.060000+02:00'), 'delta'] == pd.Timedelta('0 days 00:00:36.024000')
assert gaps.loc[Timestamp('2019-01-17 09:57:11.275000064+02:00'), 'tvalue'] ==
|
pd.Timestamp('2019-01-17 09:57:11.275000064+0200', tz='Europe/Helsinki')
|
pandas.Timestamp
|
import re
import nltk
import string
import math
from nltk.corpus import stopwords
from collections import Counter
import pandas as pd
from db.models import session, engine
from db.controller import Storage
import pymorphy2
import logging, os, sys
import datetime
from langdetect import detect, detect_langs
d = datetime.datetime.now()
dn = d.strftime("%Y-%m-%d")
FOLDER_PATH_SPAMFILTER = "d:\\CommuniGate Files\\SpamFilter\\"
FOLDER_PATH_LOG = "d:\\CommuniGate Files\\SpamFilter\\SpamFilterLog\\"
PATH_LOG = os.path.join(FOLDER_PATH_LOG, '{}.log'.format(dn))
logging.basicConfig(format='%(asctime)s.%(msecs)d %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.DEBUG,
filename=PATH_LOG)
# Морфологический анализатор pymorphy2
morph = pymorphy2.MorphAnalyzer()
storage = Storage(session)
csv_file = os.path.join(FOLDER_PATH_SPAMFILTER, 'spam_training.csv')
#csv_file = 'spam_training.csv'
# Письмо требующее проверки
#test_letter = "В магазине гора яблок. Купи семь килограмм и шоколадку"
#nltk.download('punkt')
#nltk.download('stopwords')
main_table = pd.DataFrame({
'word': [],
'spam': [],
'no_spam': [],
'probability_of_spam': [],
'probability_not_spam': []
})
garbagelist = [u'спасибо', u'пожалуйста', u'добрый', u'день', u'вечер',u'заявка', u'прошу', u'доброе', u'утро']
# Убираем все знаки препинания, Делаем все маленьким регистром, Разбиваем слова, #
# Убираем слова, которые совпадают со словами из stopwords,
def tokenize_me(file_text):
try:
tokens = nltk.word_tokenize(file_text.lower())
tokens = [i for i in tokens if (i not in string.punctuation)]
stop_words = stopwords.words('russian')
stop_words.extend(['что', 'это', 'так', 'вот', 'быть', 'как', 'в', '—', 'к', 'на'])
tokens = [i for i in tokens if (i not in stop_words)]
tokens = [i for i in tokens if (i not in garbagelist)]
tokens = [i.replace("«", "").replace("»", "") for i in tokens]
tokens = [i for i in tokens if not (len(i) == 1)]
tokens = [i for i in tokens if detect(i) == 'ru']
words = []
for word in tokens:
# Делаем полный разбор, и берем первый вариант разбора (условно "самый вероятный", но не факт что правильный)
p = morph.parse(word)[0]
words.append(p.normal_form)
return words
except Exception as e:
logging.critical('Ошибка в spam_analysis.py функции tokenize_me: {}'.format(e))
# Создаем функцию подсчета вероятности вхождения слова Xi в документ класса Qk
def formula_1(N_ik, M, N_k):
#print("({} + {}) / ({} + {})".format(1, N_ik, M, N_k))
try:
return (1+N_ik)/(M+N_k)
except ZeroDivisionError as e:
logging.critical('Ошибка в spam_analysis.py функции formula_1, деления на ноль, вероятно таблица пуста: {}'.format(e))
except Exception as e:
logging.critical('Ошибка в spam_analysis.py функции formula_1: {}'.format(e))
def training():
spam = []
not_spam = []
spam_words = []
not_spam_words = []
try:
# Обучаюшая выборка со спам письмами:
for i in storage.select_mail(spam_or_no_spam=True):
spam.append(i.text)
# Обучающая выборка с не спам письмами:
for i in storage.select_mail(spam_or_no_spam=False):
not_spam.append(i.text)
# ---------------Для спама------------------
for line in spam:
spam_words.extend(tokenize_me(line))
# Создаем таблицу с уникальными словами и их количеством
unique_words = Counter(spam_words)
# ---------------Для не спама------------------
for line in not_spam:
not_spam_words.extend(tokenize_me(line))
main_table['word'] = tuple(unique_words.keys())
main_table['spam'] = tuple(unique_words.values())
main_table['no_spam'] = [0 for x in range(len(tuple(unique_words.values())))]
for i in range(len(not_spam_words)):
# Создаем логическую переменную
need_word = True
for j in range(len(main_table.index)):
# Если "не спам" слово существует, то к счетчику уникальных слов +1
if not_spam_words[i] == main_table.loc[j, 'word']:
main_table.loc[j, "no_spam"] = main_table.loc[j, "no_spam"] + 1
need_word = False
# Если слово не встречалось еще, то добавляем его в конец data frame и создаем счетчики
if need_word:
main_table.loc[len(main_table.index)] = [not_spam_words[i], 0, 1, pd.np.nan, pd.np.nan]
main_table.to_csv(csv_file, index=False)
except Exception as e:
logging.critical('Ошибка в spam_analysis.py функции training: {}'.format(e))
def analysis(main_table, test_letter):
try:
# Считаем количество слов из обучающей выборки
quantity = len(main_table.index)
# ---------------Для проверки------------------
test_letter = tokenize_me(test_letter)
for i in range(len(test_letter)):
# Используем ту же логическую переменную, чтобы не создавать новую
need_word = True
for j in range(len(main_table.index)):
# Если слово из проверочного письма уже существует в нашей выборке то считаем вероятность каждой категории
if test_letter[i] == main_table.loc[j, 'word']:
main_table.loc[j, 'probability_of_spam'] = formula_1(main_table.loc[j, 'spam'], quantity, sum(main_table['spam']))
main_table.loc[j, 'probability_not_spam'] = formula_1(main_table.loc[j, 'no_spam'], quantity, sum(main_table['no_spam']))
need_word = False
# Если слова нет, то добавляем его в конец data frame, и считаем вероятность спама/не спама
if need_word:
main_table.loc[len(main_table.index)] = [test_letter[i], 0, 0,
formula_1(0, quantity, sum(main_table['spam'])),
formula_1(0, quantity, sum(main_table['no_spam']))]
# Переменная для подсчета оценки класса "Спам"
probability_spam = 1
# Переменная для подсчета оценки класса "Не спам"
probability_not_spam = 1
# Переменная для подсчета оценки класса "Спам"
probability_spam_log = 1
# Переменная для подсчета оценки класса "Не спам"
probability_not_spam_log = 1
for i in range(len(main_table.index)):
if not main_table.loc[i, 'probability_of_spam'] is None and not pd.isnull(
main_table.loc[i, 'probability_of_spam']):
# Шаг 1.1 Определяем оценку того, что письмо - спам
probability_spam = probability_spam * main_table.loc[i, 'probability_of_spam']
if not main_table.loc[i, 'probability_not_spam'] is None and not pd.isnull(
main_table.loc[i, 'probability_not_spam']):
# Шаг 1.2 Определяем оценку того, что письмо - не спам
probability_not_spam = probability_not_spam * main_table.loc[i, 'probability_not_spam']
#probability_spam = probability_spam * (2/4)
#probability_not_spam = probability_not_spam * (2/4)
#print(main_table)
# Шаг 2.1 Определяем оценку того, что письмо - спам
probability_spam = (main_table['spam'].sum() / (main_table['spam'].sum() + main_table['no_spam'].sum())) * probability_spam
# Шаг 2.2 Определяем оценку того, что письмо - не спам
probability_not_spam = (main_table['no_spam'].sum() / (main_table['spam'].sum() + main_table['no_spam'].sum())) * probability_not_spam
logging.debug("Оценка для категории «Спам»: {} Оценка для категории «Не спам»: {}".format(probability_spam, probability_not_spam))
logging.debug("Оценка для категории «Спам»: {} Оценка для категории «Не спам»: {}".format(math.log(probability_spam), math.log(probability_not_spam)))
# Чья оценка больше - тот и победил
if probability_spam > probability_not_spam:
spam_count = probability_spam
return True, spam_count
else:
spam_count = probability_not_spam
return False, spam_count
except Exception as e:
logging.critical('Ошибка в spam_analysis.py функции analysis: {}'.format(e))
return 'ERROR'
def spam_analysis_main(test_letter):
try:
if detect(test_letter) == 'ru':
if not os.path.isfile(csv_file):
training()
df =
|
pd.read_csv(csv_file)
|
pandas.read_csv
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression, LassoCV, RidgeCV
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error, r2_score
from sklearn.dummy import DummyRegressor
"""
NAME
eda_tools
DESCRIPTION
This module provides functions to automate common procedures in EDA, model
preparation, and data visualization process.
MODULE CONTENTS
inspect_dupes
inspect_nans
view_columns_w_many_nans
drop_columns_w_many_nans
histograms_numeric_columns
boxplots_categorical_columns
scatter_plots
heatmap_numeric_w_dependent_variable
high_corr_w_dependent_variable
high_corr_among_independent_variable
categorical_to_ordinal_transformer
transform_categorical_to_numercial
dummify_categorical_columns
conform_columns
viz_resids
print_error_metrics
"""
def inspect_dupes(df, dedupe=False):
'''
Checks duplicates (rows), and gets rid of duplicates if dedupe arg set to 'True'
Arg: dataframe, dedupe (bool)
'''
num_of_dupe = len(df[df.duplicated()])
if dedupe and num_of_dupe>0:
df.drop_duplicates(inplace=True)
print(f'Number of duplicates found: {num_of_dupe}')
return df
else:
print(f'Number of duplicates found: {num_of_dupe}')
return num_of_dupe
def inspect_nans(df):
'''
Check number and percentage of NaN
Arg: dataframe
'''
num_of_nan = df.isnull().sum().sum()
if num_of_nan > 0:
mask_total = df.isnull().sum().sort_values(ascending=False)
number = mask_total[mask_total > 0]
mask_percent = df.isnull().mean().sort_values(ascending=False)
percent = mask_percent[mask_percent > 0]
missing_data =
|
pd.concat([number, percent], axis=1, keys=['Number_of_NaN', 'Percent_of_NaN'])
|
pandas.concat
|
import sys,os,logging,random,pickle,datetime,joblib
import pandas as pd
import numpy as np
#Preprocessing
from sklearn.preprocessing._data import MinMaxScaler,RobustScaler
#Feature selection and dimensionality reduction
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.decomposition import PCA, NMF
#from mlxtend.feature_selection import SequentialFeatureSelector as SFS
#Machine Learning Models
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn import tree
#Model selection and Validation
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import Pipeline
#Performance Metrics
from sklearn.metrics import classification_report, accuracy_score, make_scorer
from sklearn.metrics import auc
from sklearn.metrics import plot_roc_curve
from sklearn.inspection import permutation_importance
#plot library
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
plt.style.use("seaborn")
#dummy dataset
from sklearn.datasets import load_breast_cancer
#pdf template
import pdfkit
from pdf.summary_template import *
# set logging config
logging.basicConfig(
filename='machine_learning_report_log.log',
level=logging.INFO,
format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
#VRE logger
from utils import logger
def save_pdf(path):
options = {
'disable-smart-shrinking': '',
'quiet': '',
'margin-top': '0.1in',
'margin-right': '0in',
'margin-bottom': '0in',
'margin-left': '0in',
}
pdfkit.from_string(body,path, options = options,css = os.path.join(os.getcwd(),'pdf','style.css'))
def generate_random_color():
c = tuple(np.random.randint(256, size=4)/255)
c_eush = (c[0],c[1]/5,1-c[0],np.min([c[3]*3, 1.0]))
return c_eush
def run(file_dataset = None,
classifier = 'logistic_regression',
max_features = 10,
n_folds = 5,
output_file = 'default_summary.pdf'):
logging.info('Running generate_model.py')
logging.info('Current working directory: {}'.format(os.getcwd()))
logging.info('classifier {}'.format(classifier))
logging.info('max_features {}'.format(max_features))
logging.info('n_folds {}'.format(n_folds))
logger.info('Running generate_model.py')
logger.info('Current working directory: {}'.format(os.getcwd()))
logger.info('classifier {}'.format(classifier))
logger.info('max_features {}'.format(max_features))
logger.info('n_folds {}'.format(n_folds))
classifiers = {
'logistic_regression': LogisticRegression(max_iter=2000,random_state = 42)
}
execution_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
results = pd.DataFrame(columns=('file_in',
'n_features',
'mean_acc',
'std_acc',
'mean_f1',
'mean_pre',
'mean_rec',
'clf'))
if file_dataset is not None:
logging.info('file_dataset loaded correctly{}'.format(file_dataset))
df =
|
pd.read_csv(file_dataset)
|
pandas.read_csv
|
import warnings
from itertools import product
import numpy as np
import pandas as pd
import scipy.stats as sts
import statsmodels.api as sm
from minepy import MINE
from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor
from catboost import CatBoostClassifier, CatBoostRegressor
from errors import ShapeError, IncorrectIndex
from bokeh.layouts import gridplot
from graphs.plot import heatmap, hbar, show
from stats_utils import array_drop_nan, array_fill_nan
def cosine(
y: np.ndarray,
x: np.ndarray = None,
only_endog: bool = True,
endog_index: int = 0,
prepare_data: bool = True,
**_
) -> np.ndarray:
"""Calculates cosine similarity
Returns cosine similarity between y and x if x is defined,
else if x is not defined y shape has to be greater than 1 (y~matrix)
Parameters
_____________
prepare_data bool: create numpy array (matrix) with dtypes float32
from concatenation of y and x by columns if x exist. Else creates numpy array
from y. y has to be a matrix of pandas DataFrame
endog_index int: what column will be endog variable
only_endog bool: if True return similarity by endog variable. In this case
endog_variable defined by parameter endog_index
Example:
>>> y = sts.norm(0, 1).rvs((100, 3)) # matrix (100, 3)
>>> print(cosine(y=y)) # matrix (1, 3)
>>> print(cosine(y=y, endog_index=1)) # matrix (1, 3)
>>> print(cosine(y=y[:, 0], x=y[:, 1:3] , endog_index=1)) # matrix (1, 3)
>>> print(cosine(y, only_endog=False)) # matrix (3, 3)
Return
_____________
Cosine similarity numpy array
"""
if not isinstance(endog_index, int):
raise TypeError("Endog index has to be int type in cosine")
y = DataForFeatureSelection(y=y, x=x).prepare_data() if prepare_data else y
if endog_index > y.shape[-1] or endog_index == y.shape[-1]:
raise ShapeError("Endog index has to be lower num columns in cosine")
quadratic = np.transpose(y) @ y
sqrt_sum = np.sum(y * y, axis=0, keepdims=True) ** .5
similarities = quadratic / sqrt_sum / np.transpose(sqrt_sum)
distance = np.round(similarities, 3)
return distance[endog_index, :].reshape(1, -1) if only_endog else distance
def spearman(
y: np.ndarray,
x: np.ndarray = None,
only_endog: bool = True,
endog_index: int = 0,
stat_significant: bool = False,
alpha: float = 0.1,
prepare_data: bool = True,
**_
) -> np.ndarray:
"""Calculates Spearman rank correlation
Return Spearman rank correlation between y and x if x is defined,
else if x is not defined y shape has to be greater than 1 (y~matrix)
Parameters
_____________
prepare_data bool: create numpy array (matrix) with dtypes float32
from concatenation of y and x by columns if x exist. Else create numpy array
from y. y has to be a matrix of pandas DataFrame or other matrix dtype
stat_significant bool: if True return H1 hypothesis by given alpha
significance level p-values.
alpha bool: probability of rejection H0 hypothesis.
endog_index int: what column will be endog variable
only_endog bool: if True return similarity by endog variable. In this case
endog_variable is defined by parameter endog_index
Example:
>>> y = sts.norm(0, 1).rvs((100, 3)) # matrix (100, 3)
>>> print(spearman(y=y)) # matrix (1, 3)
>>> print(spearman(y=y, endog_index=1)) # matrix (1, 3)
>>> print(spearman(y=y[:, 0], x=y[:, 1:3] , endog_index=1)) # matrix (1, 3)
>>> print(spearman(y, only_endog=False)) # matrix (3, 3)
Return
_____________
Spearman rank correlation numpy array
"""
if not isinstance(endog_index, int):
raise TypeError("Endog index has to be int type in spearman")
assert 0 < alpha < 1, 'Alpha must be in interval (0,1) in spearman'
y = DataForFeatureSelection(y=y, x=x).prepare_data() if prepare_data else y
if endog_index > y.shape[-1] or endog_index == y.shape[-1]:
raise ShapeError("Endog index has to be lower num columns in spearman")
correlation = np.empty(shape=(y.shape[-1], y.shape[-1]))
num_index = 0 if not stat_significant else 1
for i, j in product(np.arange(correlation.shape[-1]), np.arange(correlation.shape[-1])):
correlation[i, j] = sts.spearmanr(y[:, i], y[:, j])[num_index]
if stat_significant:
correlation = correlation < alpha
return correlation[endog_index, :].reshape(1, -1) if only_endog else correlation
def pearson(
y: np.ndarray,
x: np.ndarray = None,
only_endog: bool = True,
endog_index: int = 0,
stat_significant: bool = False,
alpha: float = 0.1,
prepare_data: bool = True, **_
) -> np.ndarray:
"""Calculate Pearson correlation
Return Pearson correlation between y and x if x is defined,
else if x is not defined y shape has to be greater than 1 (y~matrix)
Parameters
_____________
prepare_data bool: create numpy array (matrix) with dtype float32
from concatenation of y and x by columns if x exist. Else create numpy array
from y. y has to be pandas DataFrame or other matrix dtype.
stat_significant bool: if True return H1 hypothesis. by given by given alpha
significance level p-values
alpha bool: probability of rejection of H0 hypothesis.
endog_index int: what column will be endog hypothesis
only_endog bool: if True return similarity by endog variable. In this case
endog_variable is defined by parameter endog_index
Example:
>>> y = sts.norm(0, 1).rvs((100, 3)) # matrix (100, 3)
>>> print(pearson(y=y)) # matrix (1, 3)
>>> print(pearson(y=y, endog_index=1)) # matrix (1, 3)
>>> print(pearson(y=y[:, 0], x=y[:, 1:3] , endog_index=1)) # matrix (1, 3)
>>> print(pearson(y, only_endog=False)) # matrix (3, 3)
Return
_____________
Pearson correlation numpy array
"""
if not isinstance(endog_index, int):
raise TypeError("Endog index has to be int type in pearson")
assert 0 < alpha < 1, 'Alpha must be in interval (0,1) in pearson'
y = DataForFeatureSelection(y=y, x=x).prepare_data() if prepare_data else y
if endog_index > y.shape[-1] or endog_index == y.shape[-1]:
raise ShapeError("Endog index has to be lower num columns in spearman")
correlation = np.empty(shape=(y.shape[-1], y.shape[-1]))
num_index = 0 if not stat_significant else 1
for i, j in product(np.arange(correlation.shape[-1]), np.arange(correlation.shape[-1])):
correlation[i, j] = sts.pearsonr(y[:, i], y[:, j])[num_index]
if stat_significant:
correlation = correlation < alpha
return correlation[endog_index, :].reshape(1, -1) if only_endog else correlation
def kendall(
y: np.ndarray,
x: np.ndarray = None,
only_endog: bool = True,
endog_index: int = 0,
stat_significant: bool = False,
alpha: float = 0.1,
prepare_data: bool = True,
**_
) -> np.ndarray:
"""Calculate Kendall rank correlation
Return Kendall rank correlation between y and x if x is defined,
else if x is not defined y shape has to be greater than 1 (y~matrix)
Parameters
_____________
prepare_data bool: create numpy array (matrix) with dtypes float32
from concatenation of y and x by columns if x exist. Else create numpy array
from y. y has to be pandas DataFrame or other matrix-like dtype
stat_significant bool: if True return H1 hypothesis by given alpha
significance level p-values
alpha bool: probability of rejection of H0 hypothesis.
endog_index int: what column will be endog
only_endog bool: if True return similarity by endog variable. In this case
endog_variable is defined by parameter endog_index
Example:
>>> y = sts.norm(0, 1).rvs((100, 3)) # matrix (100, 3)
>>> print(kendall(y=y)) # matrix (1, 3)
>>> print(kendall(y=y, endog_index=1)) # matrix (1, 3)
>>> print(kendall(y=y[:, 0], x=y[:, 1:3] , endog_index=1)) # matrix (1, 3)
>>> print(kendall(y, only_endog=False)) # matrix (3, 3)
Return
_____________
Kendall rank correlation numpy array
"""
if not isinstance(endog_index, int):
raise TypeError("Endog index has to be int type in kendall")
assert 0 < alpha < 1, 'Alpha must be in interval (0,1) in kendall'
y = DataForFeatureSelection(y=y, x=x).prepare_data() if prepare_data else y
if endog_index > y.shape[-1] or endog_index == y.shape[-1]:
raise ShapeError("Endog index has to be lower num columns in kendall")
correlation = np.empty(shape=(y.shape[-1], y.shape[-1]))
num_index = 0 if not stat_significant else 1
for i, j in product(np.arange(correlation.shape[-1]), np.arange(correlation.shape[-1])):
correlation[i, j] = sts.kendalltau(y[:, i], y[:, j])[num_index]
if stat_significant:
correlation = correlation < alpha
return correlation[endog_index, :].reshape(1, -1) if only_endog else correlation
def mine(
y: np.ndarray,
x=None,
only_endog: bool = True,
endog_index: int = 0,
prepare_data: bool = True,
options=None,
**_
) -> np.ndarray:
"""Calculate Maximal Information Coefficient
Returns the Maximal Information Coefficient between y and x if x is defined,
else if x is not defined y shape has to be greater than 1 (y~matrix)
Parameters
_____________
prepare_data bool: create numpy array (matrix) with dtypes float32
from concatenation of y and x by columns if x exist. Else create numpy array
from y. y has to be like a matrix of pandas DataFrame
endog_index int: what column will be endog
only_endog bool: if True return similarity by endog variable. In this case
endog_variable defined by parameter endog_index
options dict: settings for MINE
Note
____________
For more information of MINE method see
https://minepy.readthedocs.io/en/latest/
Example:
>>> y = sts.norm(0, 1).rvs((100, 3)) # matrix (100, 3)
>>> print(cosine(y=y)) # matrix (1, 3)
>>> print(cosine(y=y, endog_index=1)) # matrix (1, 3)
>>> print(cosine(y=y[:, 0], x=y[:, 1:3] , endog_index=1)) # matrix (1, 3)
>>> print(cosine(y, only_endog=False)) # matrix (3, 3)
Return
_____________
Maximal Information Coefficient numpy array
"""
if options is None:
options = {'alpha': 0.6, 'c': 15, 'est': 'mic_approx'}
if not isinstance(endog_index, int):
raise TypeError("Endog index has to be int type in kendall")
y = DataForFeatureSelection(y=y, x=x).prepare_data() if prepare_data else y
if endog_index > y.shape[-1] or endog_index == y.shape[-1]:
raise ShapeError("Endog index has to be lower num columns in kendall")
correlation = np.empty(shape=(y.shape[-1], y.shape[-1]))
mine = MINE(**options)
for i, j in product(np.arange(correlation.shape[-1]), np.arange(correlation.shape[-1])):
mine.compute_score(y[:, i], y[:, j])
correlation[i, j] = mine.mic()
return correlation[endog_index, :].reshape(1, -1) if only_endog else correlation
def importance_forest(
y: np.ndarray,
x: np.ndarray = None,
type_model: str = 'regression',
endog_index: int = 0,
options: dict = None,
prepare_data: bool = True,
**_
) -> np.ndarray:
"""Calculate feature importance by ExtraTrees model
Return feature importance by defined endog variables of matrix (y, x)
if x is defined. Else if x is not defined y shape has to be greater than 1 (y~matrix)
and y contains all data.
Parameters
_____________
prepare_data bool: create numpy array (matrix) with dtypes float32
from concatenation of y and x by columns if x exist. Else create numpy array
from y. y has to be like a matrix of pandas DataFrame or other matrix-like dtype
endog_index int: what column will be endog
type_model str: classifier or regression
options dict (default None): parameters of ExtraTrees model
Example:
>>> y = sts.norm(0, 1).rvs((100, 3)) # matrix (100, 2)
>>> print(importance_forest(y=y)) # matrix (1, 2)
>>> print(importance_forest(y=y, endog_index=1), type_model='classifier') # matrix (1, 2)
Return
_____________
ExtraTrees feature importance numpy array
"""
if options is None:
options = {'n_estimators': 10}
data = DataForFeatureSelection(y=y, x=x).prepare_data() if prepare_data else y
if endog_index > data.shape[-1] or endog_index == data.shape[-1]:
raise ShapeError("Endog index has to be lower num columns in importance_forest")
index = np.arange(data.shape[-1])
y, x = data[:, endog_index], data[:, index[index != endog_index]]
engine = {
'regression': ExtraTreesRegressor,
'classifier': ExtraTreesClassifier
}
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
model = engine[type_model](**options)
model.fit(X=x, y=y)
return model.feature_importances_.reshape(1, -1)
def importance_catboost(
y: np.ndarray,
x: np.ndarray = None,
type_model: str = 'regression',
endog_index: int = 0,
options: dict = None,
prepare_data: bool = True,
**_
) -> np.ndarray:
"""Calculate feature importance using Catboost model
Return feature importance by defined endog variables of matrix (y, x)
if x is defined. Else if x is not defined y shape has to be greater than 1 (y~matrix)
and y contains all data.
Parameters
_____________
prepare_data bool: create numpy array (matrix) with dtypes float32
from concatenation y and x by columns if x exist. Else create numpy array
from y. y has to be like a matrix of pandas DataFrame
endog_index int: what column will be endog variables
type_model str: classifier or regression
options dict (default None): parameters of ExtraTrees model
Example:
>>> y = sts.norm(0, 1).rvs((100, 3)) # matrix (100, 2)
>>> print(importance_catboost(y=y)) # matrix (1, 2)
>>> print(importance_catboost(y=y, endog_index=1), type_model='classifier') # matrix (1, 2)
Return
_____________
Catboost feature importance numpy array
"""
if options is None:
options = {'verbose': 0, 'iterations': 1000}
data = DataForFeatureSelection(y=y, x=x).prepare_data() if prepare_data else y
if endog_index > data.shape[-1] or endog_index == data.shape[-1]:
raise ShapeError("Endog index has to be lower num columns in importance_catboost")
index = np.arange(data.shape[-1])
y, x = data[:, endog_index], data[:, index[index != endog_index]]
engine = {
'regression': CatBoostRegressor,
'classifier': CatBoostClassifier
}
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
model = engine[type_model](**options)
model.fit(X=x, y=y)
return model.feature_importances_.reshape(1, -1)
def importance_ols(
y: np.ndarray,
x: np.ndarray = None,
endog_index: int = 0,
prepare_data: bool = True,
**_
) -> np.ndarray:
"""Calculate p-values of coefs of LinearRegression (OLS method) model
Return values by defined endog variables of matrix (y, x)
if x is defined. Else if x is not defined y shape has to be greater than 1 (y~matrix)
and y proposeed all data.
Parameters
_____________
prepare_data bool: create numpy array (matrix) with dtypes float32
from concatenation of y and x by columns if x exist. Else create numpy array
from y. y has to be like a matrix of pandas DataFrame
endog_index int: what column will be endog
Example:
>>> y = sts.norm(0, 1).rvs((100, 3)) # matrix (100, 3)
>>> print(importance_ols(y=y)) # matrix (1, 2)
>>> print(importance_ols(y=y[:,0], x=y[:,1:], endog_index=2) # matrix (1, 2)
Return
_____________
OLS p-values of feature numpy array
"""
data = DataForFeatureSelection(y=y, x=x).prepare_data() if prepare_data else y
if endog_index > data.shape[-1] or endog_index == data.shape[-1]:
raise ShapeError("Endog index has to be lower num columns in importance_ols")
index = np.arange(data.shape[-1])
y, x = data[:, endog_index], data[:, index[index != endog_index]]
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
model = sm.OLS(endog=y, exog=sm.add_constant(x)).fit()
return model.pvalues[1:].reshape(1, -1)
def chi_test():
pass
class DataForFeatureSelection:
"""
This class is used inside FeatureSelection class in order
to stack y and x
Parameters
----------
y : array_like or pandas DataFrame/Series without nan-values.
x: array_like or pandas DataFrame/Series without nan-values.
"""
def __init__(self, *, y: np.ndarray or pd.DataFrame, x: np.ndarray or pd.DataFrame):
if x is not None:
assert y.shape[0] == x.shape[0], "Length of variables has to be equal in FeatureSelector"
self.y = np.float32(y) if isinstance(y, np.ndarray) else np.array(y, dtype=np.float32)
self.x = np.float32(x) if isinstance(x, np.ndarray) else np.array(x, dtype=np.float32)
def prepare_data(self):
if not np.isnan(self.x).all():
self.y = np.column_stack((self.y, self.x))
return self.y
class FeatureSelector:
"""
This class allows to measure the influence of
variables on other variables given in parameters.
Parameters
----------
y : array_like or pandas DataFrame/Series with or without nan-values.
x: array_like or pandas DataFrame/Series with or without nan-values.
columns: list containing names of variables if x and y are np.ndarray
Note
----------
Example:
>>> train = sts.norm(loc=0, scale=1).rvs((100, 3))
>>> feature_selected = FeatureSelector(train)
>>> feature_selected.summary(fill_na=np.median, endog_index=1)
"""
DEFAULT_PARAMS = {
'fill_na': np.mean,
'dropna': None,
'endog_index': 0,
'to_dataframe': True,
'show_graph': False,
'_get_graph': False
}
def __init__(
self,
y: np.ndarray or pd.DataFrame,
x: np.ndarray or pd.DataFrame = None,
columns: list = None
):
self.data = DataForFeatureSelection(y=y, x=x).prepare_data()
self.columns = self._create_columns(columns, y, x)
def _update_default_params(self, **kwargs):
params = self.DEFAULT_PARAMS.copy()
if kwargs:
params.update(**kwargs)
return params
def cosine(
self,
sort_correlation: bool = True,
**kwargs
):
params = self._update_default_params(**kwargs)
y, endog_index, y_label, columns = self._get_data_label_columns(
fill_na=params["fill_na"],
dropna=params["dropna"],
endog_index=params['endog_index']
)
params.update(endog_index=endog_index)
similar = cosine(y=y, **params)
y_label = self._get_endog_label(similar, endog_index)
similar = self.make_sort_correlation(
data=pd.DataFrame(similar, index=[y_label], columns=columns),
sort_correlation=sort_correlation
)
label_to_plot = 'Data' if isinstance(y_label, list) else y_label
fig = self.make_plot(similar, title=f'Cosine importance of {label_to_plot}', **params)
similar = self._to_datafeame(similar, y_label, params["to_dataframe"])
if params['_get_graph']:
return fig
return similar
def mine(
self,
sort_correlation: bool = True,
**kwargs
):
params = self._update_default_params(**kwargs)
y, endog_index, y_label, columns = self._get_data_label_columns(
fill_na=params["fill_na"],
dropna=params["dropna"],
endog_index=params['endog_index']
)
params.update(endog_index=endog_index)
similar = mine(y=y, **params)
y_label = self._get_endog_label(similar, endog_index)
similar = self.make_sort_correlation(
data=
|
pd.DataFrame(similar, index=[y_label], columns=columns)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from collections import Counter
from functools import partial
import scipy as sp
from scipy.stats import mode
from sklearn.ensemble import ExtraTreesRegressor
import xgboost as xgb
import lightgbm as lgb
from catboost import CatBoostRegressor
from ngboost.scores import MLE
from ngboost.learners import default_tree_learner
from ngboost import NGBRegressor
from mlxtend.regressor import StackingCVRegressor, LinearRegression
# pip install --upgrade git+https://github.com/stanfordmlgroup/ngboost.git
pd.set_option('display.max_columns', 1000)
SEED = 42
def read_data():
print('Reading train.csv file....')
train = pd.read_csv('train.csv')
print('Training.csv file have {} rows and {} columns'.format(train.shape[0], train.shape[1]))
print('Reading test.csv file....')
test = pd.read_csv('test.csv')
print('Test.csv file have {} rows and {} columns'.format(test.shape[0], test.shape[1]))
print('Reading train_labels.csv file....')
train_labels = pd.read_csv('train_labels.csv')
print('Train_labels.csv file have {} rows and {} columns'.format(train_labels.shape[0], train_labels.shape[1]))
print('Reading specs.csv file....')
specs = pd.read_csv('specs.csv')
print('Specs.csv file have {} rows and {} columns'.format(specs.shape[0], specs.shape[1]))
print('Reading sample_submission.csv file....')
sample_submission = pd.read_csv('sample_submission.csv')
print('Sample_submission.csv file have {} rows and {} columns'.format(sample_submission.shape[0], sample_submission.shape[1]))
return train, test, train_labels, specs, sample_submission
def encode_title(train, test, train_labels):
# encode title
train['title_event_code'] = list(map(lambda x, y: str(x) + '_' + str(y), train['title'], train['event_code']))
test['title_event_code'] = list(map(lambda x, y: str(x) + '_' + str(y), test['title'], test['event_code']))
all_title_event_code = list(set(train["title_event_code"].unique()).union(test["title_event_code"].unique()))
# make a list with all the unique 'titles' from the train and test set
list_of_user_activities = list(set(train['title'].unique()).union(set(test['title'].unique())))
# make a list with all the unique 'event_code' from the train and test set
list_of_event_code = list(set(train['event_code'].unique()).union(set(test['event_code'].unique())))
list_of_event_id = list(set(train['event_id'].unique()).union(set(test['event_id'].unique())))
# make a list with all the unique worlds from the train and test set
list_of_worlds = list(set(train['world'].unique()).union(set(test['world'].unique())))
# create a dictionary numerating the titles
activities_map = dict(zip(list_of_user_activities, np.arange(len(list_of_user_activities))))
activities_labels = dict(zip(np.arange(len(list_of_user_activities)), list_of_user_activities))
activities_world = dict(zip(list_of_worlds, np.arange(len(list_of_worlds))))
assess_titles = list(set(train[train['type'] == 'Assessment']['title'].value_counts().index).union(set(test[test['type'] == 'Assessment']['title'].value_counts().index)))
# replace the text titles with the number titles from the dict
train['title'] = train['title'].map(activities_map)
test['title'] = test['title'].map(activities_map)
train['world'] = train['world'].map(activities_world)
test['world'] = test['world'].map(activities_world)
train_labels['title'] = train_labels['title'].map(activities_map)
win_code = dict(zip(activities_map.values(), (4100*np.ones(len(activities_map))).astype('int')))
# then, it set one element, the 'Bird Measurer (Assessment)' as 4110, 10 more than the rest
win_code[activities_map['Bird Measurer (Assessment)']] = 4110
# convert text into datetime
train['timestamp'] = pd.to_datetime(train['timestamp'])
test['timestamp'] = pd.to_datetime(test['timestamp'])
return train, test, train_labels, win_code, list_of_user_activities, list_of_event_code, activities_labels, assess_titles, list_of_event_id, all_title_event_code
# this is the function that convert the raw data into processed features
def get_data(user_sample, test_set=False):
'''
The user_sample is a DataFrame from train or test where the only one
installation_id is filtered
And the test_set parameter is related with the labels processing, that is only requered
if test_set=False
'''
# Constants and parameters declaration
last_activity = 0
user_activities_count = {'Clip':0, 'Activity': 0, 'Assessment': 0, 'Game':0}
# new features: time spent in each activity
last_session_time_sec = 0
accuracy_groups = {0:0, 1:0, 2:0, 3:0}
all_assessments = []
accumulated_accuracy_group = 0
accumulated_accuracy = 0
accumulated_correct_attempts = 0
accumulated_uncorrect_attempts = 0
accumulated_actions = 0
counter = 0
time_first_activity = float(user_sample['timestamp'].values[0])
durations = []
last_accuracy_title = {'acc_' + title: -1 for title in assess_titles}
event_code_count: Dict[str, int] = {ev: 0 for ev in list_of_event_code}
event_id_count: Dict[str, int] = {eve: 0 for eve in list_of_event_id}
title_count: Dict[str, int] = {eve: 0 for eve in activities_labels.values()}
title_event_code_count: Dict[str, int] = {t_eve: 0 for t_eve in all_title_event_code}
# last features
sessions_count = 0
# itarates through each session of one instalation_id
for i, session in user_sample.groupby('game_session', sort=False):
# i = game_session_id
# session is a DataFrame that contain only one game_session
# get some sessions information
session_type = session['type'].iloc[0]
session_title = session['title'].iloc[0]
session_title_text = activities_labels[session_title]
# for each assessment, and only this kind off session, the features below are processed
# and a register are generated
if (session_type == 'Assessment') & (test_set or len(session)>1):
# search for event_code 4100, that represents the assessments trial
all_attempts = session.query(f'event_code == {win_code[session_title]}')
# then, check the numbers of wins and the number of losses
true_attempts = all_attempts['event_data'].str.contains('true').sum()
false_attempts = all_attempts['event_data'].str.contains('false').sum()
# copy a dict to use as feature template, it's initialized with some itens:
# {'Clip':0, 'Activity': 0, 'Assessment': 0, 'Game':0}
features = user_activities_count.copy()
features.update(last_accuracy_title.copy())
features.update(event_code_count.copy())
features.update(event_id_count.copy())
features.update(title_count.copy())
features.update(title_event_code_count.copy())
features.update(last_accuracy_title.copy())
features['installation_session_count'] = sessions_count
variety_features = [('var_event_code', event_code_count),
('var_event_id', event_id_count),
('var_title', title_count),
('var_title_event_code', title_event_code_count)]
for name, dict_counts in variety_features:
arr = np.array(list(dict_counts.values()))
features[name] = np.count_nonzero(arr)
# get installation_id for aggregated features
features['installation_id'] = session['installation_id'].iloc[-1]
# add title as feature, remembering that title represents the name of the game
features['session_title'] = session['title'].iloc[0]
# the 4 lines below add the feature of the history of the trials of this player
# this is based on the all time attempts so far, at the moment of this assessment
features['accumulated_correct_attempts'] = accumulated_correct_attempts
features['accumulated_uncorrect_attempts'] = accumulated_uncorrect_attempts
accumulated_correct_attempts += true_attempts
accumulated_uncorrect_attempts += false_attempts
# the time spent in the app so far
if durations == []:
features['duration_mean'] = 0
features['duration_std'] = 0
else:
features['duration_mean'] = np.mean(durations)
features['duration_std'] = np.std(durations)
durations.append((session.iloc[-1, 2] - session.iloc[0, 2] ).seconds)
# the accurace is the all time wins divided by the all time attempts
features['accumulated_accuracy'] = accumulated_accuracy/counter if counter > 0 else 0
accuracy = true_attempts/(true_attempts+false_attempts) if (true_attempts+false_attempts) != 0 else 0
accumulated_accuracy += accuracy
last_accuracy_title['acc_' + session_title_text] = accuracy
# a feature of the current accuracy categorized
# it is a counter of how many times this player was in each accuracy group
if accuracy == 0:
features['accuracy_group'] = 0
elif accuracy == 1:
features['accuracy_group'] = 3
elif accuracy == 0.5:
features['accuracy_group'] = 2
else:
features['accuracy_group'] = 1
features.update(accuracy_groups)
accuracy_groups[features['accuracy_group']] += 1
# mean of the all accuracy groups of this player
features['accumulated_accuracy_group'] = accumulated_accuracy_group/counter if counter > 0 else 0
accumulated_accuracy_group += features['accuracy_group']
# how many actions the player has done so far, it is initialized as 0 and updated some lines below
features['accumulated_actions'] = accumulated_actions
# there are some conditions to allow this features to be inserted in the datasets
# if it's a test set, all sessions belong to the final dataset
# it it's a train, needs to be passed throught this clausule: session.query(f'event_code == {win_code[session_title]}')
# that means, must exist an event_code 4100 or 4110
if test_set:
all_assessments.append(features)
elif true_attempts+false_attempts > 0:
all_assessments.append(features)
counter += 1
sessions_count += 1
# this piece counts how many actions was made in each event_code so far
def update_counters(counter: dict, col: str):
num_of_session_count = Counter(session[col])
for k in num_of_session_count.keys():
x = k
if col == 'title':
x = activities_labels[k]
counter[x] += num_of_session_count[k]
return counter
event_code_count = update_counters(event_code_count, "event_code")
event_id_count = update_counters(event_id_count, "event_id")
title_count = update_counters(title_count, 'title')
title_event_code_count = update_counters(title_event_code_count, 'title_event_code')
# counts how many actions the player has done so far, used in the feature of the same name
accumulated_actions += len(session)
if last_activity != session_type:
user_activities_count[session_type] += 1
last_activitiy = session_type
# if it't the test_set, only the last assessment must be predicted, the previous are scraped
if test_set:
return all_assessments[-1]
# in the train_set, all assessments goes to the dataset
return all_assessments
'''
The user_sample is a DataFrame from train or test where the only one
installation_id is filtered
And the test_set parameter is related with the labels processing, that is only requered
if test_set=False
'''
# Constants and parameters declaration
last_activity = 0
user_activities_count = {'Clip':0, 'Activity': 0, 'Assessment': 0, 'Game':0}
# new features: time spent in each activity
event_code_count = {eve: 0 for eve in list_of_event_code}
last_session_time_sec = 0
accuracy_groups = {0:0, 1:0, 2:0, 3:0}
all_assessments = []
accumulated_accuracy_group = 0
accumulated_accuracy = 0
accumulated_correct_attempts = 0
accumulated_uncorrect_attempts = 0
accumulated_actions = 0
counter = 0
time_first_activity = float(user_sample['timestamp'].values[0])
durations = []
# itarates through each session of one instalation_id
for i, session in user_sample.groupby('game_session', sort=False):
# i = game_session_id
# session is a DataFrame that contain only one game_session
# get some sessions information
session_type = session['type'].iloc[0]
session_title = session['title'].iloc[0]
# for each assessment, and only this kind off session, the features below are processed
# and a register are generated
if (session_type == 'Assessment') & (test_set or len(session)>1):
# search for event_code 4100, that represents the assessments trial
all_attempts = session.query(f'event_code == {win_code[session_title]}')
# then, check the numbers of wins and the number of losses
true_attempts = all_attempts['event_data'].str.contains('true').sum()
false_attempts = all_attempts['event_data'].str.contains('false').sum()
# copy a dict to use as feature template, it's initialized with some itens:
# {'Clip':0, 'Activity': 0, 'Assessment': 0, 'Game':0}
features = user_activities_count.copy()
features.update(event_code_count.copy())
# get installation_id for aggregated features
features['installation_id'] = session['installation_id'].iloc[-1]
# add title as feature, remembering that title represents the name of the game
features['session_title'] = session['title'].iloc[0]
# the 4 lines below add the feature of the history of the trials of this player
# this is based on the all time attempts so far, at the moment of this assessment
features['accumulated_correct_attempts'] = accumulated_correct_attempts
features['accumulated_uncorrect_attempts'] = accumulated_uncorrect_attempts
accumulated_correct_attempts += true_attempts
accumulated_uncorrect_attempts += false_attempts
# the time spent in the app so far
if durations == []:
features['duration_mean'] = 0
else:
features['duration_mean'] = np.mean(durations)
durations.append((session.iloc[-1, 2] - session.iloc[0, 2] ).seconds)
# the accurace is the all time wins divided by the all time attempts
features['accumulated_accuracy'] = accumulated_accuracy/counter if counter > 0 else 0
accuracy = true_attempts/(true_attempts+false_attempts) if (true_attempts+false_attempts) != 0 else 0
accumulated_accuracy += accuracy
# a feature of the current accuracy categorized
# it is a counter of how many times this player was in each accuracy group
if accuracy == 0:
features['accuracy_group'] = 0
elif accuracy == 1:
features['accuracy_group'] = 3
elif accuracy == 0.5:
features['accuracy_group'] = 2
else:
features['accuracy_group'] = 1
features.update(accuracy_groups)
accuracy_groups[features['accuracy_group']] += 1
# mean of the all accuracy groups of this player
features['accumulated_accuracy_group'] = accumulated_accuracy_group/counter if counter > 0 else 0
accumulated_accuracy_group += features['accuracy_group']
# how many actions the player has done so far, it is initialized as 0 and updated some lines below
features['accumulated_actions'] = accumulated_actions
# there are some conditions to allow this features to be inserted in the datasets
# if it's a test set, all sessions belong to the final dataset
# it it's a train, needs to be passed throught this clausule: session.query(f'event_code == {win_code[session_title]}')
# that means, must exist an event_code 4100 or 4110
if test_set:
all_assessments.append(features)
elif true_attempts+false_attempts > 0:
all_assessments.append(features)
counter += 1
# this piece counts how many actions was made in each event_code so far
n_of_event_codes = Counter(session['event_code'])
for key in n_of_event_codes.keys():
event_code_count[key] += n_of_event_codes[key]
# counts how many actions the player has done so far, used in the feature of the same name
accumulated_actions += len(session)
if last_activity != session_type:
user_activities_count[session_type] += 1
last_activitiy = session_type
# if it't the test_set, only the last assessment must be predicted, the previous are scraped
if test_set:
return all_assessments[-1]
# in the train_set, all assessments goes to the dataset
return all_assessments
def get_train_and_test(train, test):
compiled_train = []
compiled_test = []
for i, (ins_id, user_sample) in enumerate(train.groupby('installation_id', sort = False)):
compiled_train += get_data(user_sample)
for ins_id, user_sample in test.groupby('installation_id', sort = False):
test_data = get_data(user_sample, test_set = True)
compiled_test.append(test_data)
reduce_train =
|
pd.DataFrame(compiled_train)
|
pandas.DataFrame
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.testing._utils import NUMERIC_TYPES, assert_eq
from cudf.utils.dtypes import np_dtypes_to_pandas_dtypes
def test_can_cast_safely_same_kind():
# 'i' -> 'i'
data = cudf.Series([1, 2, 3], dtype="int32")._column
to_dtype = np.dtype("int64")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 3], dtype="int64")._column
to_dtype = np.dtype("int32")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 2**31], dtype="int64")._column
assert not data.can_cast_safely(to_dtype)
# 'u' -> 'u'
data = cudf.Series([1, 2, 3], dtype="uint32")._column
to_dtype = np.dtype("uint64")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 3], dtype="uint64")._column
to_dtype = np.dtype("uint32")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 2**33], dtype="uint64")._column
assert not data.can_cast_safely(to_dtype)
# 'f' -> 'f'
data = cudf.Series([np.inf, 1.0], dtype="float64")._column
to_dtype = np.dtype("float32")
assert data.can_cast_safely(to_dtype)
data = cudf.Series(
[np.finfo("float32").max * 2, 1.0], dtype="float64"
)._column
to_dtype = np.dtype("float32")
assert not data.can_cast_safely(to_dtype)
def test_can_cast_safely_mixed_kind():
data = cudf.Series([1, 2, 3], dtype="int32")._column
to_dtype = np.dtype("float32")
assert data.can_cast_safely(to_dtype)
# too big to fit into f32 exactly
data = cudf.Series([1, 2, 2**24 + 1], dtype="int32")._column
assert not data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 3], dtype="uint32")._column
to_dtype = np.dtype("float32")
assert data.can_cast_safely(to_dtype)
# too big to fit into f32 exactly
data = cudf.Series([1, 2, 2**24 + 1], dtype="uint32")._column
assert not data.can_cast_safely(to_dtype)
to_dtype = np.dtype("float64")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1.0, 2.0, 3.0], dtype="float32")._column
to_dtype = np.dtype("int32")
assert data.can_cast_safely(to_dtype)
# not integer float
data = cudf.Series([1.0, 2.0, 3.5], dtype="float32")._column
assert not data.can_cast_safely(to_dtype)
data = cudf.Series([10.0, 11.0, 2000.0], dtype="float64")._column
assert data.can_cast_safely(to_dtype)
# float out of int range
data = cudf.Series([1.0, 2.0, 1.0 * (2**31)], dtype="float32")._column
assert not data.can_cast_safely(to_dtype)
# negative signed integers casting to unsigned integers
data = cudf.Series([-1, 0, 1], dtype="int32")._column
to_dtype = np.dtype("uint32")
assert not data.can_cast_safely(to_dtype)
def test_to_pandas_nullable_integer():
gsr_not_null = cudf.Series([1, 2, 3])
gsr_has_null = cudf.Series([1, 2, None])
psr_not_null = pd.Series([1, 2, 3], dtype="int64")
psr_has_null =
|
pd.Series([1, 2, None], dtype="Int64")
|
pandas.Series
|
import random
import time
import pandas as pd
import numpy as np
from random import seed, randint
import pathlib
import requests
import zipfile
import io
from datetime import datetime, date, timedelta
import re
from controls import urban, NUTS118NM, NUTS118NM_118CD
# get relative data folder
PATH = pathlib.Path().parent
DATA_PATH = PATH.joinpath("data").resolve()
FILE_PATH = DATA_PATH.joinpath('postcodes.csv').resolve()
FILE_PATH_1 = DATA_PATH.joinpath('conditions.csv').resolve()
FILE_PATH_2 = DATA_PATH.joinpath(
'December_2018__to_NUTS3_to_NUTS2_to_NUTS1.csv').resolve()
regex = "^(?:(?P<a1>[Gg][Ii][Rr])(?P<d1>) (?P<s1>0)(?P<u1>[Aa]{2}))|(?:(?:(?:(?P<a2>[A-Za-z])(?P<d2>[0-9]{1,2}))|(?:(?:(?P<a3>[A-Za-z][A-Ha-hJ-Yj-y])(?P<d3>[0-9]{1,2}))|(?:(?:(?P<a4>[A-Za-z])(?P<d4>[0-9][A-Za-z]))|(?:(?P<a5>[A-Za-z][A-Ha-hJ-Yj-y])(?P<d5>[0-9]?[A-Za-z]))))) (?P<s2>[0-9])(?P<u2>[A-Za-z]{2}))$"
conditions = ['seizures',
'hypertension',
'parkinsons',
'diabetes',
'mentalIllness',
'alzheimers',
'irregularHeart',
'bradicardia',
'tachycardia',
'respiratory',
'multipleSclerosis',
'miscNeural',
'hypotension',
'parkinsonDisease',
'mentalHealthIssue',
'paralysis',
'cancer',
'miscHeart',
'bradycardia',
'brainInjury',
'stroke',
'sleepApnea',
'arthritis',
'renalUrinary',
'insomnia',
'dementia',
'otherHeartCondition']
def calculate_age(born):
today = date.today()
try:
birthday = born.replace(year=today.year)
except ValueError: # raised when birth date is February 29 and the current year is not a leap year
birthday = born.replace(year=today.year, month=born.month+1, day=1)
if birthday > today:
return today.year - born.year - 1
else:
return today.year - born.year
def gen_gender(number):
gender = ['male', 'female', 'other']
cond = []
for _ in range(number):
value = 1
cond.append(random.sample(gender, value))
return pd.DataFrame(cond, columns=['gender'])
def gen_dob(begin, end, sample_size):
dob = []
for i in range(sample_size):
start_date = date(begin, 1, 1)
end_date = date(end, 2, 1)
time_between_dates = end_date - start_date
days_between_dates = time_between_dates.days
random_number_of_days = random.randrange(days_between_dates)
random_date = start_date + timedelta(days=random_number_of_days)
#date_time_obj = datetime.strptime(random_date, '%Y-%m-%d')
dob.append(random_date)
df = pd.to_datetime(dob)
return pd.DataFrame(df)
def addNumberToDate(date, sample_size):
number_to_add = np.random.randint(50, 1000, size=sample_size)
exit = pd.to_datetime(date) + pd.to_timedelta(number_to_add, unit='D')
return exit
def bmi_func(weight, height):
height = height/100
bmi = weight / height**2
return bmi
def random_date(start, end, prop):
return str_time_prop(start, end, '%m/%d/%Y', prop)
def generate_dates(start, end, prop, number_generate):
users_dob = []
column_names = ['dob']
df = pd.DataFrame(columns=column_names)
for i in range(number_generate):
dob = random_date(start, end, random.random())
users_dob.append(dob)
return pd.DataFrame(users_dob, columns=['dob'])
def gen_weight_height(start, end, number, col):
number_gen = []
for _ in range(number):
value = randint(start, end)
number_gen.append(value)
return pd.DataFrame(number_gen, columns=[col])
def gen_conditions(conditions, number):
cond = []
for _ in range(number):
value = randint(0, 5)
if value == 0:
cond.append({'conditions': ['no conditions']})
else:
cond.append({'conditions': random.sample(conditions, value)})
return pd.DataFrame(cond, columns=['conditions'])
def gen_postcodes(postcodes, sample_size):
return pd.DataFrame(random.sample(postcodes, sample_size), columns=['postcode'])
def update_file():
'''
https://www.doogal.co.uk/postcodedownloads.php
'''
zip_file_url = 'https://www.doogal.co.uk/files/postcodes.zip'
r = requests.get(zip_file_url)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(DATA_PATH)
z.close()
data =
|
pd.read_csv(FILE_PATH, low_memory=False)
|
pandas.read_csv
|
import numpy as np
import csv
import pandas as pd
import matplotlib.pyplot as plt
import math
import tensorflow as tf
import seaborn as sns
import itertools
import operator
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectFromModel
from sklearn.naive_bayes import GaussianNB, CategoricalNB
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, roc_curve, roc_auc_score, \
recall_score, precision_score, mean_squared_error
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn import metrics, preprocessing, tree
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_classification
from sklearn.inspection import permutation_importance
# for data and modeling
# import keras
# from keras.callbacks import EarlyStopping
# from keras.wrappers.scikit_learn import KerasClassifier
# from keras.utils import np_utils
# from keras.models import Sequential
# from keras.layers import Dense, Dropout
# from tensorflow.keras import datasets, layers, models
from six import StringIO
from IPython.display import Image
import pydotplus
from ast import literal_eval
from collections import Counter
def heatmap_confmat(ytest, ypred, name):
labels = [0, 1]
conf_mat = confusion_matrix(ytest, ypred, labels=labels)
print(conf_mat)
# heatm = sns.heatmap(conf_mat, annot=True)
# print(heatm)
group_names = ['True Neg', 'False Pos', 'False Neg', 'True Pos']
group_counts = ["{0:0.0f}".format(value) for value in conf_mat.flatten()]
group_percentages = ["{0:.2%}".format(value) for value in conf_mat.flatten() / np.sum(conf_mat)]
labels = [f"{v1}\n{v2}\n{v3}" for v1, v2, v3 in zip(group_names, group_counts, group_percentages)]
labels = np.asarray(labels).reshape(2, 2)
heat = sns.heatmap(conf_mat, annot=labels, fmt='', cmap='Blues')
heat.figure.savefig(name)
def plot_feature_importances(importance):
importances = pd.DataFrame({'feature': feature_names, 'feature_importance': importance})
plt.figure(figsize=(12, 10))
plt.title("Feature importances")
plt.xlabel("Permutation importance")
plt.barh(importances["feature"].tolist(), importances["feature_importance"].tolist())
plt.savefig("nb_heatmap_40.png")
def NaiveBayes(xtrain, ytrain, xtest, ytest, binary=False):
if binary:
nb = GaussianNB()
model = "GaussianNB"
else:
nb = CategoricalNB()
model = "CategoricalNB"
nb.fit(xtrain, ytrain)
nb.predict(xtest)
y_pred_nb = nb.predict(xtest)
y_prob_pred_nb = nb.predict_proba(xtest)
# how did our model perform?
count_misclassified = (ytest != y_pred_nb).sum()
print(model)
print("=" * 30)
print('Misclassified samples: {}'.format(count_misclassified))
accuracy = accuracy_score(ytest, y_pred_nb)
print('Accuracy: {:.5f}'.format(accuracy))
heatmap_confmat(ytest, y_pred_nb, "naive_bayes.png")
feature_importance_NB(nb, xtest, ytest)
print("Naive Bayes done")
def feature_importance_NB(model, xval, yval):
r = permutation_importance(model, xval, yval, n_repeats=30, random_state=0)
print(len(r))
imp = r.importances_mean
importance = np.add(imp[40:], imp[:40])
# importance = imp
# for i in r.importances_mean.argsort()[::-1]:
# if r.importances_mean[i] - 2 * r.importances_std[i] > 0:
# print(f"{feature_names[i]: <8}" f"{r.importances_mean[i]: .3f}" f" +/- {r.importances_std[i]: .3f}")
plot_feature_importances(importance)
# importances = pd.DataFrame({'feature': feature_names, 'feature_importance': importance})
# plt.figure(figsize=(12, 10))
# plt.title("Feature importances")
# plt.xlabel("Permutation importance")
# plt.barh(importances["feature"].tolist(), importances["feature_importance"].tolist())
# plt.savefig("nb_heatmap_40.png")
def LogRegression(x, y, xtest, ytest):
# define the model
model = LogisticRegression()
# fit the model
model.fit(x, y)
# predict y
ypred = model.predict(xtest)
print(ypred[:10])
ypred = [1 if i > 0.6 else 0 for i in ypred]
accuracy = accuracy_score(ytest, ypred)
print(accuracy)
# heatmap_confmat(ytest, ypred, "logregression_heatmap.png")
imp = np.std(x, 0) * model.coef_[0]
# imp = model.coef_[0]
# importance = imp
importance = np.add(imp[40:], imp[:40])
feature_importance = pd.DataFrame({'feature': feature_names, 'feature_importance': importance})
print(feature_importance.sort_values('feature_importance', ascending=False).head(10))
# plt.barh([x for x in range(len(importance))], importance)
importances = pd.DataFrame({'feature': feature_names, 'feature_importance': importance})
plt.figure(figsize=(12, 10))
plt.title("Feature importances")
plt.barh(importances["feature"].tolist(), importances["feature_importance"].tolist())
plt.savefig("logreg_barplot_40.png")
# xpos = [x for x in range(len(importance))]
# plt.bar(xpos, importance)
# plt.xticks(xpos, feature_names_trimmed)
# plt.savefig("linreg.png")
# w = model.coef_[0]
# feature_importance = pd.DataFrame(feature_names, columns = ["feature"])
# feature_importance["importance"] = pow(math.e, w)
# feature_importance = feature_importance.sort_values(by = ["importance"], ascending=False)
# ax = feature_importance.plot.barh(x='feature', y='importance')
# plt.savefig("linreg.png")
print("Logistic Regression done")
def RandomForest(xtrain, ytrain, xtest, ytest):
# Create a Gaussian Classifier
model = RandomForestClassifier(n_estimators=100)
# Train the model using the training sets y_pred=clf.predict(X_test)
model.fit(xtrain, ytrain)
ypred = model.predict(xtest)
print("Accuracy:", metrics.accuracy_score(ytest, ypred))
# heatmap_confmat(ytest, ypred, "randomforest_heatmap.png")
scores = model.feature_importances_
scores = np.add(scores[40:], scores[:40])
print(sorted(zip(map(lambda x: round(x, 4), scores), feature_names),
reverse=True))
importances = pd.DataFrame({'feature': feature_names, 'feature_importance': scores})
plt.figure(figsize=(12, 10))
plt.title("Feature importances")
plt.barh(importances["feature"].tolist(), importances["feature_importance"].tolist())
plt.savefig("rf_barplot_80_depth100.png")
# support = model.get_support()
# print(support)
# selected_feat = X_train.columns[(sel.get_support())]
# print(selected_feat)
# # PermutationImportance(model, xtest, ytest)
print("random forest done")
def DecisionTree(xtrain, ytrain, xtest, ytest, selection=False):
if selection:
feature_names = selection
model = DecisionTreeClassifier(max_depth=10)
# Train Decision Tree Classifer
model = model.fit(xtrain, ytrain)
# Predict the response for test dataset
ypred = model.predict(xtest)
print("Accuracy:", metrics.accuracy_score(ytest, ypred))
# VisualizationTree(model)
# heatmap_confmat(ytest, ypred, "decisiontree_heatmap_80.png")
# print("heatmap saved")
imp = model.feature_importances_
# Change for 40 or 80 features:
importance = np.add(imp[40:], imp[:40])
# importance = imp
feature_numbers = [36, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 38, 39]
feature_names = [feature_names_original[i] for i in feature_numbers]
importances = pd.DataFrame({'feature': feature_names, 'feature_importance': importance})
print(importances.sort_values('feature_importance', ascending=False).head(10))
# std = np.std([tree.feature_importances_ for tree in model.estimators_], axis=0)
# PermutationImportance(model, xtest, ytest)
plt.figure(figsize=(12, 10))
plt.title("Feature importances")
plt.barh(importances["feature"].tolist(), importances["feature_importance"].tolist())
plt.savefig("decisiontree.png")
print("decision tree done")
def VisualizationTree(clf):
feature_cols = [i for i in range(80)]
target_names = ['0', '1']
tree.plot_tree(clf,
feature_names=feature_cols,
class_names=target_names,
filled=True,
rounded=True)
plt.figure(figsize=(12, 12))
plt.savefig('tree_visualization.png', bbox_inches='tight', dpi=100, fontsize=18)
def NeuralNetwork(xtrain, ytrain, xtest, ytest, feed_forward=False):
print('X_train:', np.shape(xtrain))
print('y_train:', np.shape(ytrain))
print('X_test:', np.shape(xtest))
print('y_test:', np.shape(ytest))
model = Sequential()
# if feed_forward:
model.add(Dense(256, input_shape=(287399, 80), activation="sigmoid"))
model.add(Dense(128, activation="sigmoid"))
model.add(Dense(10, activation="softmax"))
model.add(Dense(1, activation='hard_sigmoid'))
model.summary()
sgd = keras.optimizers.SGD(lr=0.5, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
# early stopping callback
# This callback will stop the training when there is no improvement in
# the validation loss for 10 consecutive epochs.
es = keras.callbacks.EarlyStopping(monitor='val_loss',
mode='min',
patience=10,
restore_best_weights=True) # important - otherwise you just return the last weigths...
# train_data = tf.data.Dataset.from_tensor_slices(xtrain, ytrain)
model.fit(xtrain, ytrain, epochs=30)
ypred = model.predict(xtest)
ypred = [1 if i > 0.5 else 0 for i in ypred]
loss_and_metrics = model.evaluate(xtest, ytest)
print('Loss = ', loss_and_metrics[0])
print('Accuracy = ', loss_and_metrics[1])
heatmap_confmat(ytest, ypred, "neuralnet.png")
print("neural network done")
def SVM(xtrain, ytrain, xtest, ytest):
model = make_pipeline(StandardScaler(), LinearSVC(random_state=0, tol=1e-5, multi_class="crammer_singer"))
model.fit(xtrain, ytrain)
imp = model.named_steps['linearsvc'].coef_
ypred = model.predict(xtest)
print("Accuracy:", metrics.accuracy_score(ytest, ypred))
# heatmap_confmat(ytest, ypred, "svm.png")
# Uncommend for 80 features
scores = np.add(imp[0][40:], imp[0][:40])
# Uncommend for 40 features
# scores = imp[0]
# scores = [float(i) / sum(scores) for i in scores]
sorted_index = sorted(range(len(scores)), key=lambda k: scores[k])
for i in sorted_index:
print(str(feature_names[i]) + ": " + str(scores[i]))
print("SVM done")
# features_names = ['input1', 'input2']
# f_importances(scores, features_names)
# imp = coef
# imp, names = zip(*sorted(zip(imp, names)))
# plt.barh(range(len(names)), imp, align='center')
# plt.yticks(range(len(names)), names)
# plt.savefig("barplot_svm_40.png")
importances = pd.DataFrame({'feature': feature_names, 'feature_importance': scores})
plt.figure(figsize=(12, 10))
plt.title("Feature importances")
plt.barh(importances["feature"].tolist(), importances["feature_importance"].tolist())
plt.savefig("svm_barplot_40.png")
def Boost(xtrain, ytrain, xtest, ytest):
# data_dmatrix = xgb.DMatrix(data=xtrain, label=ytrain)
print(len(xtrain[0]))
print(len(feature_names))
x_train =
|
pd.DataFrame(data=xtrain, columns=feature_names)
|
pandas.DataFrame
|
"""Twitter view"""
__docformat__ = "numpy"
import logging
import os
from typing import Optional, List
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from dateutil import parser as dparse
import gamestonk_terminal.config_plot as cfg_plot
from gamestonk_terminal.config_terminal import theme
from gamestonk_terminal.common.behavioural_analysis import twitter_model
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.helper_funcs import export_data, plot_autoscale
from gamestonk_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_inference(ticker: str, num: int, export: str = ""):
"""Infer sentiment from past n tweets
Parameters
----------
ticker: str
Stock ticker
num: int
Number of tweets to analyze
export: str
Format to export tweet dataframe
"""
df_tweets = twitter_model.load_analyze_tweets(ticker, num)
if df_tweets.empty:
return
# Parse tweets
dt_from = dparse.parse(df_tweets["created_at"].values[-1])
dt_to = dparse.parse(df_tweets["created_at"].values[0])
console.print(f"From: {dt_from.strftime('%Y-%m-%d %H:%M:%S')}")
console.print(f"To: {dt_to.strftime('%Y-%m-%d %H:%M:%S')}")
console.print(f"{len(df_tweets)} tweets were analyzed.")
dt_delta = dt_to - dt_from
n_freq = dt_delta.total_seconds() / len(df_tweets)
console.print(f"Frequency of approx 1 tweet every {round(n_freq)} seconds.")
pos = df_tweets["positive"]
neg = df_tweets["negative"]
percent_pos = len(np.where(pos > neg)[0]) / len(df_tweets)
percent_neg = len(np.where(pos < neg)[0]) / len(df_tweets)
total_sent = np.round(np.sum(df_tweets["sentiment"]), 2)
mean_sent = np.round(np.mean(df_tweets["sentiment"]), 2)
console.print(f"The summed compound sentiment of {ticker} is: {total_sent}")
console.print(f"The average compound sentiment of {ticker} is: {mean_sent}")
console.print(
f"Of the last {len(df_tweets)} tweets, {100*percent_pos:.2f} % had a higher positive sentiment"
)
console.print(
f"Of the last {len(df_tweets)} tweets, {100*percent_neg:.2f} % had a higher negative sentiment"
)
console.print("")
export_data(export, os.path.dirname(os.path.abspath(__file__)), "infer", df_tweets)
@log_start_end(log=logger)
def display_sentiment(
ticker: str,
n_tweets: int,
n_days_past: int,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot sentiments from ticker
Parameters
----------
ticker: str
Stock to get sentiment for
n_tweets: int
Number of tweets to get per hour
n_days_past: int
Number of days to extract tweets for
export: str
Format to export tweet dataframe
"""
# Date format string required by twitter
dt_format = "%Y-%m-%dT%H:%M:%SZ"
# Algorithm to extract
dt_recent = datetime.utcnow() - timedelta(seconds=20)
dt_old = dt_recent - timedelta(days=n_days_past)
console.print(
f"From {dt_recent.date()} retrieving {n_tweets*24} tweets ({n_tweets} tweets/hour)"
)
df_tweets = pd.DataFrame(
columns=[
"created_at",
"text",
"sentiment",
"positive",
"negative",
"neutral",
]
)
while True:
# Iterate until we haven't passed the old number of days
if dt_recent < dt_old:
break
# Update past datetime
dt_past = dt_recent - timedelta(minutes=60)
temp = twitter_model.load_analyze_tweets(
ticker,
n_tweets,
start_time=dt_past.strftime(dt_format),
end_time=dt_recent.strftime(dt_format),
)
if temp.empty:
return
df_tweets =
|
pd.concat([df_tweets, temp])
|
pandas.concat
|
import logging
import os
import time
import warnings
from datetime import date, datetime, timedelta
from io import StringIO
from typing import Dict, Iterable, List, Optional, Union
from urllib.parse import urljoin
import numpy as np
import pandas as pd
import requests
import tables
from pvoutput.consts import (
BASE_URL,
CONFIG_FILENAME,
ONE_DAY,
PV_OUTPUT_DATE_FORMAT,
RATE_LIMIT_PARAMS_TO_API_HEADERS,
)
from pvoutput.daterange import DateRange, merge_date_ranges_to_years
from pvoutput.exceptions import NoStatusFound, RateLimitExceeded
from pvoutput.utils import (
_get_param_from_config_file,
_get_response,
_print_and_log,
get_date_ranges_to_download,
sort_and_de_dupe_pv_system,
system_id_to_hdf_key,
)
_LOG = logging.getLogger("pvoutput")
class PVOutput:
"""
Attributes:
api_key
system_id
rate_limit_remaining
rate_limit_total
rate_limit_reset_time
data_service_url
"""
def __init__(
self,
api_key: str = None,
system_id: str = None,
config_filename: Optional[str] = CONFIG_FILENAME,
data_service_url: Optional[str] = None,
):
"""
Args:
api_key: Your API key from PVOutput.org.
system_id: Your system ID from PVOutput.org. If you don't have a
PV system then you can register with PVOutput.org and select
the 'energy consumption only' box.
config_filename: Optional, the filename of the .yml config file.
data_service_url: Optional. If you have subscribed to
PVOutput.org's data service then add the data service URL here.
This string must end in '.org'.
"""
self.api_key = api_key
self.system_id = system_id
self.rate_limit_remaining = None
self.rate_limit_total = None
self.rate_limit_reset_time = None
self.data_service_url = data_service_url
# Set from config file if None
for param_name in ["api_key", "system_id"]:
if getattr(self, param_name) is None:
try:
param_value_from_config = _get_param_from_config_file(
param_name, config_filename
)
except Exception as e:
msg = (
"Error loading configuration parameter {param_name}"
" from config file {filename}. Either pass"
" {param_name} into PVOutput constructor, or create"
" config file {filename}. {exception}".format(
param_name=param_name, filename=CONFIG_FILENAME, exception=e
)
)
print(msg)
_LOG.exception(msg)
raise
setattr(self, param_name, param_value_from_config)
# Convert to strings
setattr(self, param_name, str(getattr(self, param_name)))
# Check for data_service_url
if self.data_service_url is None:
try:
self.data_service_url = _get_param_from_config_file(
"data_service_url", config_filename
)
except KeyError:
pass
except FileNotFoundError:
pass
if self.data_service_url is not None:
if not self.data_service_url.strip("/").endswith(".org"):
raise ValueError("data_service_url must end in '.org'")
def search(
self,
query: str,
lat: Optional[float] = None,
lon: Optional[float] = None,
include_country: bool = True,
**kwargs
) -> pd.DataFrame:
"""Search for PV systems.
Some quirks of the PVOutput.org API:
- The maximum number of results returned by PVOutput.org is 30.
If the number of returned results is 30, then there is no
indication of whether there are exactly 30 search results,
or if there are more than 30. Also, there is no way to
request additional 'pages' of search results.
- The maximum search radius is 25km
Args:
query: string, see https://pvoutput.org/help.html#search
e.g. '5km'.
lat: float, e.g. 52.0668589
lon: float, e.g. -1.3484038
include_country: bool, whether or not to include the country name
with the returned postcode.
Returns:
pd.DataFrame, one row per search results. Index is PV system ID.
Columns:
name,
system_DC_capacity_W,
address, # If `include_country` is True then address is
# 'country> <postcode>',
# else address is '<postcode>'.
orientation,
num_outputs,
last_output,
panel,
inverter,
distance_km,
latitude,
longitude
"""
api_params = {"q": query, "country": int(include_country)}
if lat is not None and lon is not None:
api_params["ll"] = "{:f},{:f}".format(lat, lon)
pv_systems_text = self._api_query(service="search", api_params=api_params, **kwargs)
pv_systems = pd.read_csv(
StringIO(pv_systems_text),
names=[
"name",
"system_DC_capacity_W",
"address",
"orientation",
"num_outputs",
"last_output",
"system_id",
"panel",
"inverter",
"distance_km",
"latitude",
"longitude",
],
index_col="system_id",
)
return pv_systems
def get_status(
self, pv_system_id: int, date: Union[str, datetime], historic: bool = True, **kwargs
) -> pd.DataFrame:
"""Get PV system status (e.g. power generation) for one day.
The returned DataFrame will be empty if the PVOutput API
returns 'status 400: No status found'.
Args:
pv_system_id: int
date: str in format YYYYMMDD; or datetime
(localtime of the PV system)
Returns:
pd.DataFrame:
index: datetime (DatetimeIndex, localtime of the PV system)
columns: (all np.float64):
cumulative_energy_gen_Wh,
energy_efficiency_kWh_per_kW,
instantaneous_power_gen_W,
average_power_gen_W,
power_gen_normalised,
energy_consumption_Wh,
power_demand_W,
temperature_C,
voltage
"""
_LOG.info("system_id %d: Requesting system status for %s", pv_system_id, date)
date = date_to_pvoutput_str(date)
_check_date(date)
api_params = {
"d": date, # date, YYYYMMDD, localtime of the PV system
"h": int(historic == True), # We want historical data.
"limit": 288, # API limit is 288 (num of 5-min periods per day).
"ext": 0, # Extended data; we don't want extended data.
"sid1": pv_system_id, # SystemID.
}
try:
pv_system_status_text = self._api_query(
service="getstatus", api_params=api_params, **kwargs
)
except NoStatusFound:
_LOG.info("system_id %d: No status found for date %s", pv_system_id, date)
pv_system_status_text = ""
# See https://pvoutput.org/help.html#api-getstatus but make sure
# you read the 'History Query' subsection, as a historical query
# has slightly different return columns compared to a non-historical
# query!
columns = (
[
"cumulative_energy_gen_Wh",
"energy_efficiency_kWh_per_kW",
"instantaneous_power_gen_W",
"average_power_gen_W",
"power_gen_normalised",
"energy_consumption_Wh",
"power_demand_W",
"temperature_C",
"voltage",
]
if historic
else [
"cumulative_energy_gen_Wh",
"instantaneous_power_gen_W",
"energy_consumption_Wh",
"power_demand_W",
"power_gen_normalised",
"temperature_C",
"voltage",
]
)
pv_system_status = pd.read_csv(
StringIO(pv_system_status_text),
lineterminator=";",
names=["date", "time"] + columns,
parse_dates={"datetime": ["date", "time"]},
index_col=["datetime"],
dtype={col: np.float64 for col in columns},
).sort_index()
return pv_system_status
def get_batch_status(
self,
pv_system_id: int,
date_to: Optional[Union[str, datetime]] = None,
max_retries: Optional[int] = 1000,
**kwargs
) -> Union[None, pd.DataFrame]:
"""Get batch PV system status (e.g. power generation).
The returned DataFrame will be empty if the PVOutput API
returns 'status 400: No status found'.
Data returned is limited to the last 366 days per request.
To retrieve older data, use the date_to parameter.
The PVOutput getbatchstatus API is asynchronous. When it's first
called, it replies to say 'accepted'. This function will then
wait a minute and call the API again to see if the data is ready.
Set `max_retries` to 1 if you want to return immediately, even
if data isn't ready yet (and hence this function will return None)
https://pvoutput.org/help.html#dataservice-getbatchstatus
Args:
pv_system_id: int
date_to: str in format YYYYMMDD; or datetime
(localtime of the PV system). The returned timeseries will
include 366 days of data: from YYYY-1MMDD to YYYYMMDD inclusive
max_retries: int, number of times to retry after receiving
a '202 Accepted' request. Set `max_retries` to 1 if you want
to return immediately, even if data isn't ready yet (and hence
this function will return None).
Returns:
None (if data isn't ready after retrying max_retries times) or
pd.DataFrame:
index: datetime (DatetimeIndex, localtime of the PV system)
columns: (all np.float64):
cumulative_energy_gen_Wh,
instantaneous_power_gen_W,
temperature_C,
voltage
"""
api_params = {"sid1": pv_system_id}
_set_date_param(date_to, api_params, "dt")
for retry in range(max_retries):
try:
pv_system_status_text = self._api_query(
service="getbatchstatus", api_params=api_params, use_data_service=True, **kwargs
)
except NoStatusFound:
_LOG.info("system_id %d: No status found for date_to %s", pv_system_id, date_to)
pv_system_status_text = ""
break
if "Accepted 202" in pv_system_status_text:
if retry == 0:
_print_and_log("Request accepted.")
if retry < max_retries - 1:
_print_and_log("Sleeping for 1 minute.")
time.sleep(60)
else:
_print_and_log(
"Call get_batch_status again in a minute to see if" " results are ready."
)
else:
break
else:
return
return _process_batch_status(pv_system_status_text)
def get_metadata(self, pv_system_id: int, **kwargs) -> pd.Series:
"""Get metadata for a single PV system.
Args:
pv_system_id: int
Returns:
pd.Series. Index is:
name,
system_DC_capacity_W,
address,
num_panels,
panel_capacity_W_each,
panel_brand,
num_inverters,
inverter_capacity_W,
inverter_brand,
orientation,
array_tilt_degrees,
shade,
install_date,
latitude,
longitude,
status_interval_minutes,
secondary_num_panels,
secondary_panel_capacity_W_each,
secondary_orientation,
secondary_array_tilt_degrees
"""
pv_metadata_text = self._api_query(
service="getsystem",
api_params={
"array2": 1, # Provide data about secondary array, if present.
"tariffs": 0,
"teams": 0,
"est": 0,
"donations": 0,
"sid1": pv_system_id, # SystemID
"ext": 0, # Include extended data?
},
**kwargs
)
pv_metadata = pd.read_csv(
StringIO(pv_metadata_text),
lineterminator=";",
names=[
"name",
"system_DC_capacity_W",
"address",
"num_panels",
"panel_capacity_W_each",
"panel_brand",
"num_inverters",
"inverter_capacity_W",
"inverter_brand",
"orientation",
"array_tilt_degrees",
"shade",
"install_date",
"latitude",
"longitude",
"status_interval_minutes",
"secondary_num_panels",
"secondary_panel_capacity_W_each",
"secondary_orientation",
"secondary_array_tilt_degrees",
],
parse_dates=["install_date"],
nrows=1,
).squeeze()
pv_metadata["system_id"] = pv_system_id
pv_metadata.name = pv_system_id
return pv_metadata
def get_statistic(
self,
pv_system_id: int,
date_from: Optional[Union[str, date]] = None,
date_to: Optional[Union[str, date]] = None,
**kwargs
) -> pd.DataFrame:
"""Get summary stats for a single PV system.
Args:
pv_system_id: int
date_from
date_to
Returns:
pd.DataFrame:
total_energy_gen_Wh,
energy_exported_Wh,
average_daily_energy_gen_Wh,
minimum_daily_energy_gen_Wh,
maximum_daily_energy_gen_Wh,
average_efficiency_kWh_per_kW,
num_outputs, # The number of days for which there's >= 1 val.
actual_date_from,
actual_date_to,
record_efficiency_kWh_per_kW,
record_efficiency_date,
query_date_from,
query_date_to
"""
if date_from and not date_to:
date_to = pd.Timestamp.now().date()
if date_to and not date_from:
date_from = pd.Timestamp("1900-01-01").date()
api_params = {
"c": 0, # consumption and import
"crdr": 0, # credits / debits
"sid1": pv_system_id, # SystemID
}
_set_date_param(date_from, api_params, "df")
_set_date_param(date_to, api_params, "dt")
try:
pv_metadata_text = self._api_query(
service="getstatistic", api_params=api_params, **kwargs
)
except NoStatusFound:
pv_metadata_text = ""
columns = [
"total_energy_gen_Wh",
"energy_exported_Wh",
"average_daily_energy_gen_Wh",
"minimum_daily_energy_gen_Wh",
"maximum_daily_energy_gen_Wh",
"average_efficiency_kWh_per_kW",
"num_outputs",
"actual_date_from",
"actual_date_to",
"record_efficiency_kWh_per_kW",
"record_efficiency_date",
]
date_cols = ["actual_date_from", "actual_date_to", "record_efficiency_date"]
numeric_cols = set(columns) - set(date_cols)
pv_metadata = pd.read_csv(
StringIO(pv_metadata_text),
names=columns,
dtype={col: np.float32 for col in numeric_cols},
parse_dates=date_cols,
)
if pv_metadata.empty:
data = {col: np.float32(np.NaN) for col in numeric_cols}
data.update({col: pd.NaT for col in date_cols})
pv_metadata = pd.DataFrame(data, index=[pv_system_id])
else:
pv_metadata.index = [pv_system_id]
pv_metadata["query_date_from"] = pd.Timestamp(date_from) if date_from else pd.NaT
pv_metadata["query_date_to"] = pd.Timestamp(date_to) if date_to else pd.Timestamp.now()
return pv_metadata
def _get_statistic_with_cache(
self,
store_filename: str,
pv_system_id: int,
date_from: Optional[Union[str, date]] = None,
date_to: Optional[Union[str, date]] = None,
**kwargs
) -> pd.Series:
"""Will try to get stats from store_filename['statistics']. If this
fails, or if date_to > query_date_to, or if
date_from < query_date_from, then will call the API. Note that the aim
of this function is just to find the relevant actual_date_from and
actual_date_to, so this function does not respect the other params.
"""
if date_from:
date_from = pd.Timestamp(date_from).date()
if date_to:
date_to = pd.Timestamp(date_to).date()
def _get_fresh_statistic():
_LOG.info("pv_system %d: Getting fresh statistic.", pv_system_id)
stats = self.get_statistic(pv_system_id, **kwargs)
with pd.HDFStore(store_filename, mode="a") as store:
try:
store.remove(key="statistics", where="index=pv_system_id")
except KeyError:
pass
store.append(key="statistics", value=stats)
return stats
try:
stats =
|
pd.read_hdf(store_filename, key="statistics", where="index=pv_system_id")
|
pandas.read_hdf
|
import math
import itertools
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import scipy.stats as ss
import scikit_posthocs as sp
from dash_table.Format import Format, Scheme
from Bio import Phylo
from ete3 import Tree
from plotly.subplots import make_subplots
# -------------------------------------------------------------------------------------
# --------------------------------------- Classes -------------------------------------
class DrawTree():
def __init__(self, newicktree, template, topology, color_map, branch_len, font_family):
self.newicktree = Phylo.read(newicktree, "newick")
self.template = template
self.topology = topology
self.color_map = color_map
self.branch_len = branch_len
self.font_family = font_family
def create_square_tree(self):
def get_x_coordinates(tree):
"""Associates to each clade an x-coord.
returns dict {clade: x-coord}
"""
if self.branch_len:
xcoords = tree.depths(unit_branch_lengths=True)
else:
xcoords = tree.depths()
# tree.depth() maps tree clades to depths (by branch length).
# returns a dict {clade: depth} where clade runs over all Clade instances of the tree, and depth is the distance from root to clade
# If there are no branch lengths, assign unit branch lengths
if not max(xcoords.values()):
xcoords = tree.depths(unit_branch_lengths=True)
return xcoords
def get_y_coordinates(tree, dist=1.3):
"""
returns dict {clade: y-coord}
The y-coordinates are (float) multiple of integers (i*dist below)
dist depends on the number of tree leafs
"""
maxheight = tree.count_terminals() # Counts the number of tree leafs.
# Rows are defined by the tips/leafs
ycoords = dict(
(leaf, maxheight - i * dist)
for i, leaf in enumerate(reversed(tree.get_terminals()))
)
def calc_row(clade):
for subclade in clade:
if subclade not in ycoords:
calc_row(subclade)
# This is intermediate placement of internal nodes
ycoords[clade] = (ycoords[clade.clades[0]] + ycoords[clade.clades[-1]]) / 2
if tree.root.clades:
calc_row(tree.root)
return ycoords
def get_clade_lines(
orientation="horizontal",
y_curr=0,
x_start=0,
x_curr=0,
y_bot=0,
y_top=0,
line_color="white",
line_width=2,
root_clade = False
):
"""define a shape of type 'line', for branch
"""
branch_line = dict(
type="line", layer="below", line=dict(color=line_color, width=line_width)
)
if root_clade:
branch_line.update(x0=-0.01, y0=y_curr, x1=-0.01, y1=y_curr)
return branch_line
elif orientation == "horizontal":
branch_line.update(x0=x_start, y0=y_curr, x1=x_curr, y1=y_curr)
elif orientation == "vertical":
branch_line.update(x0=x_curr, y0=y_bot, x1=x_curr, y1=y_top)
else:
raise ValueError("Line type can be 'horizontal' or 'vertical'")
return branch_line
def draw_clade(
clade,
x_start,
line_shapes,
line_color="white",
line_width=2,
x_coords=0,
y_coords=0,
init_clade=False,
):
"""Recursively draw the tree branches, down from the given clade"""
x_curr = x_coords[clade]
y_curr = y_coords[clade]
# Draw a horizontal line from start to here
if init_clade:
branch_line = get_clade_lines(
orientation="horizontal",
y_curr=y_curr,
x_start=x_start,
x_curr=x_curr,
line_color=line_color,
line_width=line_width,
root_clade=True,
)
else:
branch_line = get_clade_lines(
orientation="horizontal",
y_curr=y_curr,
x_start=x_start,
x_curr=x_curr,
line_color=line_color,
line_width=line_width,
root_clade=False,
)
line_shapes.append(branch_line)
if clade.clades:
# Draw a vertical line connecting all children
y_top = y_coords[clade.clades[0]]
y_bot = y_coords[clade.clades[-1]]
line_shapes.append(
get_clade_lines(
orientation="vertical",
x_curr=x_curr,
y_bot=y_bot,
y_top=y_top,
line_color=line_color,
line_width=line_width,
)
)
# Draw descendants
for child in clade:
draw_clade(child, x_curr, line_shapes,
x_coords=x_coords, y_coords=y_coords,
line_color=line_color)
if 'dark' in self.template:
text_color = 'white'
else:
text_color = 'black'
line_color = self.color_map[self.topology]
tree = self.newicktree
tree.ladderize()
x_coords = get_x_coordinates(tree)
y_coords = get_y_coordinates(tree)
line_shapes = []
draw_clade(
tree.root,
0,
line_shapes,
line_color=line_color,
line_width=2,
x_coords=x_coords,
y_coords=y_coords,
init_clade=True,
)
my_tree_clades = x_coords.keys()
X = []
Y = []
text = []
for cl in my_tree_clades:
X.append(x_coords[cl])
Y.append(y_coords[cl])
# Add confidence values if internal node
if not cl.name:
if not cl.name:
text.append(" ")
else:
text.append(cl.name)
else:
text.append(cl.name)
axis = dict(
showline=False,
visible=False,
zeroline=False,
showgrid=False,
showticklabels=False,
title="", # y title
)
label_legend = ["Tree_1"]
nodes = []
for elt in label_legend:
node = dict(
type="scatter",
x=X,
y=Y,
mode="markers+text",
marker=dict(color=text_color, size=5),
text=text, # vignet information of each node
textposition='middle right',
textfont=dict(color=text_color, size=12),
showlegend=False,
name=elt,
)
nodes.append(node)
# Set graph x-range
if self.branch_len:
x_range = [-0.5, (max(x_coords.values())+2)]
show_xaxis = False
elif max(x_coords.values()) < 0.1:
x_range = [0, (max(x_coords.values())+(max(x_coords.values())*1.25))]
show_xaxis = True
elif max(x_coords.values()) < 0.5:
x_range = [0, 0.5]
show_xaxis = True
elif max(x_coords.values()) < 1:
x_range = [0, 1]
show_xaxis = True
elif max(x_coords.values()) == 1:
x_range = [0, max(x_coords.values())+2]
show_xaxis = False
else:
x_range = [0, max(x_coords.values())+2]
show_xaxis = False
layout = dict(
autosize=True,
showlegend=False,
template=self.template,
dragmode="pan",
margin=dict(t=20, b=10, r=20, l=10),
xaxis=dict(
showline=True,
zeroline=False,
visible=show_xaxis,
showgrid=False,
showticklabels=True,
range=x_range,
),
yaxis=axis,
hovermode="closest",
shapes=line_shapes,
font=dict(family=self.font_family,size=14),
)
fig = go.Figure(data=nodes, layout=layout)
return fig
def create_angular_tree(self):
def get_x_coordinates(tree):
"""Associates to each clade an x-coord.
returns dict {clade: x-coord}
"""
# xcoords = tree.depths(unit_branch_lengths=True)
# print("===========================")
# nodes = [n for n in tree.find_clades()]
# nodes = tree.get_terminals() + tree.get_nonterminals()
# print(tree.root.clades)
# root_xcoord = {tree.root.clades[1]:0}
terminal_nodes = tree.get_terminals()
internal_nodes = tree.get_nonterminals()
terminal_xcoords = dict((leaf, i) for i, leaf in enumerate(terminal_nodes))
internal_xcoords = dict(
(leaf, i+0.5) for leaf, i in zip(internal_nodes, range(1, len(internal_nodes)))
)
xcoords = {**terminal_xcoords, **internal_xcoords}
# print(xcoords)
# print("===========================")
# tree.depth() maps tree clades to depths (by branch length).
# returns a dict {clade: depth} where clade runs over all Clade instances of the tree, and depth
# is the distance from root to clade
# If there are no branch lengths, assign unit branch lengths
if not max(xcoords.values()):
xcoords = tree.depths(unit_branch_lengths=True)
return xcoords
def get_y_coordinates(tree, dist=1):
"""
returns dict {clade: y-coord}
The y-coordinates are (float) multiple of integers (i*dist below)
dist depends on the number of tree leafs
"""
maxheight = tree.count_terminals() # Counts the number of tree leafs.
# Rows are defined by the tips/leafs
# root_ycoord = {tree.root:maxheight}
terminal_nodes = tree.get_terminals()
internal_nodes = tree.get_nonterminals()
terminal_ycoords = dict((leaf, 1) for _, leaf in enumerate(terminal_nodes))
internal_ycoords = dict(
(leaf, i) for leaf, i in zip(internal_nodes, reversed(range(1, len(internal_nodes))))
)
ycoords = {**terminal_ycoords, **internal_ycoords}
def calc_row(clade):
for subclade in clade:
if subclade not in ycoords:
calc_row(subclade)
ycoords[clade] = (ycoords[clade.clades[0]] +
ycoords[clade.clades[-1]]) / 2
if tree.root.clades:
calc_row(tree.root)
return ycoords
def get_clade_lines(
orientation="horizontal",
y_curr=0,
last_y_curr=0,
x_start=0,
x_curr=0,
y_bot=0,
y_top=0,
line_color="rgb(25,25,25)",
line_width=0.5,
init_flag=False,
):
"""define a shape of type 'line', for branch
"""
branch_line = dict(
type="line", layer="below", line=dict(color=line_color, width=line_width)
)
if orientation == "horizontal":
if init_flag:
branch_line.update(x0=x_start, y0=y_curr,
x1=x_curr, y1=y_curr)
else:
branch_line.update(
x0=x_start, y0=last_y_curr, x1=x_curr, y1=y_curr)
elif orientation == "vertical":
branch_line.update(x0=x_curr, y0=y_bot, x1=x_curr, y1=y_top)
else:
raise ValueError("Line type can be 'horizontal' or 'vertical'")
return branch_line
def draw_clade(
clade,
x_start,
line_shapes,
line_color="rgb(15,15,15)",
line_width=1,
x_coords=0,
y_coords=0,
last_clade_y_coord=0,
init_flag=True
):
"""Recursively draw the tree branches, down from the given clade"""
x_curr = x_coords[clade]
y_curr = y_coords[clade]
# Draw a horizontal line from start to here
branch_line = get_clade_lines(
orientation="horizontal",
y_curr=y_curr,
last_y_curr=last_clade_y_coord,
x_start=x_start,
x_curr=x_curr,
line_color=line_color,
line_width=line_width,
init_flag=init_flag,
)
line_shapes.append(branch_line)
if clade.clades:
# Draw descendants
for child in clade:
draw_clade(child, x_curr, line_shapes, x_coords=x_coords,
y_coords=y_coords, last_clade_y_coord=y_coords[clade],
init_flag=False, line_color=line_color)
if 'dark' in self.template:
text_color = 'white'
else:
text_color = 'black'
line_color = self.color_map[self.topology]
# Load in Tree object and ladderize
tree = self.newicktree
tree.ladderize()
# Get coordinates + put into dictionary
# dict(keys=clade_names, values=)
x_coords = get_x_coordinates(tree)
y_coords = get_y_coordinates(tree)
line_shapes = []
draw_clade(
tree.root,
0,
line_shapes,
line_color=line_color,
line_width=2,
x_coords=x_coords,
y_coords=y_coords,
)
#
my_tree_clades = x_coords.keys()
X = []
Y = []
text = []
for cl in my_tree_clades:
X.append(x_coords[cl])
Y.append(y_coords[cl])
# Add confidence values if internal node
if not cl.name:
text.append(cl.confidence)
else:
text.append(cl.name)
axis = dict(
showline=False,
zeroline=False,
showgrid=False,
visible=False,
showticklabels=False,
)
label_legend = ["Tree_1"]
nodes = []
for elt in label_legend:
node = dict(
type="scatter",
x=X,
y=Y,
mode="markers+text",
marker=dict(color=text_color, size=5),
text=text, # vignet information of each node
textposition='right',
textfont=dict(color=text_color, size=25),
showlegend=False,
name=elt,
)
nodes.append(node)
layout = dict(
template=self.template,
dragmode="select",
autosize=True,
showlegend=True,
xaxis=dict(
showline=True,
zeroline=False,
visible=False,
showgrid=False,
showticklabels=True,
range=[0, (max(x_coords.values())+2)]
),
yaxis=axis,
hovermode="closest",
shapes=line_shapes,
legend={"x": 0, "y": 1},
font=dict(family="Open Sans"),
)
fig = dict(data=nodes, layout=layout)
return fig
def create_circular_tree(self):
def get_circular_tree_data(tree, order='level', dist=1, start_angle=0, end_angle=360, start_leaf='first'):
"""Define data needed to get the Plotly plot of a circular tree
Source code found at: https://chart-studio.plotly.com/~empet/14834.embed
"""
# tree: an instance of Bio.Phylo.Newick.Tree or Bio.Phylo.PhyloXML.Phylogeny
# order: tree traversal method to associate polar coordinates to its nodes
# dist: the vertical distance between two consecutive leafs in the associated rectangular tree layout
# start_angle: angle in degrees representing the angle of the first leaf mapped to a circle
# end_angle: angle in degrees representing the angle of the last leaf
# the list of leafs mapped in anticlockwise direction onto circles can be tree.get_terminals()
# or its reversed version tree.get_terminals()[::-1].
# start leaf: is a keyword with two possible values"
# 'first': to map the leafs in the list tree.get_terminals() onto a circle,
# in the counter-clockwise direction
# 'last': to map the leafs in the list, tree.get_terminals()[::-1]
start_angle *= np.pi/180 # conversion to radians
end_angle *= np.pi/180
def get_radius(tree):
"""
Associates to each clade root its radius, equal to the distance from that clade to the tree root
returns dict {clade: node_radius}
"""
if self.branch_len:
node_radius = tree.depths(unit_branch_lengths=True)
else:
node_radius = tree.depths()
# If the tree did not record the branch lengths assign the unit branch length
# (ex: the case of a newick tree "(A, (B, C), (D, E))")
if not np.count_nonzero(node_radius.values()):
node_radius = tree.depths(unit_branch_lengths=True)
return node_radius
def get_vertical_position(tree):
"""
returns a dict {clade: ycoord}, where y-coord is the cartesian y-coordinate
of a clade root in a rectangular phylogram
"""
n_leafs = tree.count_terminals() # Counts the number of tree leafs.
# Assign y-coordinates to the tree leafs
if start_leaf == 'first':
node_ycoord = dict((leaf, k) for k, leaf in enumerate(tree.get_terminals()))
elif start_leaf == 'last':
node_ycoord = dict((leaf, k) for k, leaf in enumerate(reversed(tree.get_terminals())))
else:
raise ValueError("start leaf can be only 'first' or 'last'")
def assign_ycoord(clade):#compute the y-coord for the root of this clade
for subclade in clade:
if subclade not in node_ycoord: # if the subclade root hasn't a y-coord yet
assign_ycoord(subclade)
node_ycoord[clade] = 0.5 * (node_ycoord[clade.clades[0]] + node_ycoord[clade.clades[-1]])
if tree.root.clades:
assign_ycoord(tree.root)
return node_ycoord
node_radius = get_radius(tree)
node_ycoord = get_vertical_position(tree)
y_vals = node_ycoord.values()
ymin, ymax = min(y_vals), max(y_vals)
ymin -= dist # this dist subtraction is necessary to avoid coincidence of the first and last leaf angle
# when the interval [ymin, ymax] is mapped onto [0, 2pi],
def ycoord2theta(y):
# maps an y in the interval [ymin-dist, ymax] to the interval [radian(start_angle), radian(end_angle)]
return start_angle + (end_angle - start_angle) * (y-ymin) / float(ymax-ymin)
def get_points_on_lines(linetype='radial', x_left=0, x_right=0, y_right=0, y_bot=0, y_top=0):
"""
- define the points that generate a radial branch and the circular arcs, perpendicular to that branch
- a circular arc (angular linetype) is defined by 10 points on the segment of ends
(x_bot, y_bot), (x_top, y_top) in the rectangular layout,
mapped by the polar transformation into 10 points that are spline interpolated
- returns for each linetype the lists X, Y, containing the x-coords, resp y-coords of the
line representative points
"""
if linetype == 'radial':
theta = ycoord2theta(y_right)
X = [x_left*np.cos(theta), x_right*np.cos(theta), None]
Y = [x_left*np.sin(theta), x_right*np.sin(theta), None]
elif linetype == 'angular':
theta_b = ycoord2theta(y_bot)
theta_t = ycoord2theta(y_top)
t = np.linspace(0,1, 10)# 10 points that span the circular arc
theta = (1-t) * theta_b + t * theta_t
X = list(x_right * np.cos(theta)) + [None]
Y = list(x_right * np.sin(theta)) + [None]
else:
raise ValueError("linetype can be only 'radial' or 'angular'")
return X,Y
def get_line_lists(clade, x_left, xlines, ylines, xarc, yarc):
"""Recursively compute the lists of points that span the tree branches"""
# xlines, ylines - the lists of x-coords, resp y-coords of radial edge ends
# xarc, yarc - the lists of points generating arc segments for tree branches
x_right = node_radius[clade]
y_right = node_ycoord[clade]
X,Y = get_points_on_lines(linetype='radial', x_left=x_left, x_right=x_right, y_right=y_right)
xlines.extend(X)
ylines.extend(Y)
if clade.clades:
y_top = node_ycoord[clade.clades[0]]
y_bot = node_ycoord[clade.clades[-1]]
X,Y = get_points_on_lines(linetype='angular', x_right=x_right, y_bot=y_bot, y_top=y_top)
xarc.extend(X)
yarc.extend(Y)
# get and append the lists of points representing the branches of the descedants
for child in clade:
get_line_lists(child, x_right, xlines, ylines, xarc, yarc)
xlines = []
ylines = []
xarc = []
yarc = []
get_line_lists(tree.root, 0, xlines, ylines, xarc, yarc)
xnodes = []
ynodes = []
for clade in tree.find_clades(order='preorder'): #it was 'level'
theta = ycoord2theta(node_ycoord[clade])
xnodes.append(node_radius[clade]*np.cos(theta))
ynodes.append(node_radius[clade]*np.sin(theta))
return xnodes, ynodes, xlines, ylines, xarc, yarc
if 'dark' in self.template:
text_color = 'white'
else:
text_color = 'black'
line_color = self.color_map[self.topology]
tree = self.newicktree
tree.ladderize()
traverse_order = 'preorder'
all_clades=list(tree.find_clades(order=traverse_order))
for k in range(len((all_clades))):
all_clades[k].id=k
xnodes, ynodes, xlines, ylines, xarc, yarc = get_circular_tree_data(tree, order=traverse_order, start_leaf='last')
tooltip=[]
clade_names=[]
color=[]
for clade in tree.find_clades(order=traverse_order):
if self.branch_len:
branch_length = 1
else:
branch_length = clade.branch_length
if clade.name and clade.confidence and clade.branch_length:
tooltip.append(f"name: {clade.name}<br>branch-length: {branch_length}\
<br>confidence: {int(clade.confidence)}")
color.append[clade.confidence.value]
clade_names.append(clade.name)
elif clade.name is None and clade.branch_length is not None and clade.confidence is not None:
color.append(clade.confidence)
clade_names.append(clade.name)
tooltip.append(f"branch-length: {branch_length}\
<br>confidence: {int(clade.confidence)}")
elif clade.name and clade.branch_length and clade.confidence is None:
tooltip.append(f"name: {clade.name}<br>branch-length: {branch_length}")
color.append(-1)
clade_names.append(clade.name)
else:
tooltip.append('')
color.append(-1)
clade_names.append(clade.name)
trace_nodes=dict(type='scatter',
x=xnodes,
y= ynodes,
mode='markers+text',
marker=dict(color=text_color, size=8),
text=clade_names,
textposition='top center',
textfont=dict(color=text_color, size=12),
hoverinfo='text',
hovertemplate=tooltip,
)
trace_radial_lines=dict(type='scatter',
x=xlines,
y=ylines,
mode='lines',
line=dict(color=line_color, width=1),
hoverinfo='none',
)
trace_arcs=dict(type='scatter',
x=xarc,
y=yarc,
mode='lines',
line=dict(color=line_color, width=1, shape='spline'),
hoverinfo='none',
)
layout=dict(
font=dict(family=self.font_family,size=14),
autosize=True,
showlegend=False,
template=self.template,
xaxis=dict(visible=False),
yaxis=dict(visible=False),
hovermode='closest',
margin=dict(t=20, b=10, r=20, l=10, pad=20),
)
fig = go.Figure(data=[trace_radial_lines, trace_arcs, trace_nodes], layout=layout)
return fig
class RFDistance():
def __init__(self, t1, t2):
self.t1 = Tree(t1)
self.t2 = Tree(t2)
self.compare = self.t1.compare(self.t2)
def NormRF(self):
return self.compare['norm_rf']
def RF(self):
return self.compare['rf']
def MaxRF(self):
return self.compare['max_rf']
# -------------------------------------------------------------------------------------
# ------------------------------ Alt Data Graph Functions -----------------------------
def make_alt_data_str_figure(
alt_data_to_graph,
chromosome_df,
color_mapping,
topology_df,
window_size,
template,
dataRange,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
whole_genome,
):
# sort dataframe
topology_df.sort_values(by=["Window"], inplace=True)
topology_df.fillna("NULL", inplace=True)
# Build graph
if whole_genome:
fig = px.histogram(
topology_df,
x="Window",
y=[1]*len(topology_df),
category_orders={"Chromosome": chromosome_df['Chromosome']},
color=alt_data_to_graph,
color_discrete_sequence=list(color_mapping.values()),
nbins=int(chromosome_df["End"].max()/window_size),
facet_row="Chromosome",
)
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_layout(
template=template,
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0
),
title={
'text': str(alt_data_to_graph),
'x':0.5,
'xanchor': 'center',
'yanchor': 'top',
},
hovermode="x unified",
font=dict(family=font_family,),
height=100*len(topology_df["Chromosome"].unique())
)
else:
fig = px.histogram(
topology_df,
x="Window",
y=[1]*len(topology_df),
color=alt_data_to_graph,
color_discrete_sequence=list(color_mapping.values()),
nbins=int(chromosome_df["End"].max()/window_size),
)
fig.update_layout(
template=template,
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0
),
title={
'text': str(alt_data_to_graph),
'x':0.5,
'xanchor': 'center',
'yanchor': 'top',
},
hovermode="x unified",
font=dict(family=font_family,),
)
if dataRange:
fig.update_xaxes(
title="Position",
range=dataRange,
showline=True,
showgrid=xaxis_gridlines,
linewidth=axis_line_width,
)
else:
fig.update_xaxes(
title="Position",
showline=True,
showgrid=xaxis_gridlines,
linewidth=axis_line_width,
)
fig.update_yaxes(
title="y-axis",
range=[0, 1],
nticks=1,
showline=True,
showgrid=yaxis_gridlines,
linewidth=axis_line_width,
)
return fig
def make_alt_data_int_figure(
alt_data_to_graph,
color_mapping,
topology_df,
chromosome_df,
template,
dataRange,
y_max,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
whole_genome,
):
# sort dataframe
topology_df = topology_df.sort_values(by=["Window"])
y_range = [0, (y_max*1.1)]
# Build graph
if whole_genome:
fig = px.line(
topology_df,
x="Window",
y=alt_data_to_graph,
category_orders={"Chromosome": chromosome_df['Chromosome']},
color_discrete_sequence=list(color_mapping.values()),
facet_row="Chromosome",
)
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_layout(
template=template,
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
title={
'text': str(alt_data_to_graph),
'x':0.5,
'xanchor': 'center',
'yanchor': 'top',
},
hovermode="x unified",
font=dict(family=font_family,),
height=100*len(topology_df["Chromosome"].unique()),
)
else:
fig = px.line(
topology_df,
x="Window",
y=alt_data_to_graph,
color_discrete_sequence=list(color_mapping.values()),
)
fig.update_layout(
template=template,
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
title={
'text': str(alt_data_to_graph),
'x':0.5,
'xanchor': 'center',
'yanchor': 'top',
},
hovermode="x unified",
font=dict(family=font_family,),
)
# Update X-axis
if dataRange:
fig.update_xaxes(
title="Position",
range=dataRange,
showline=True,
showgrid=xaxis_gridlines,
linewidth=axis_line_width,
)
else:
fig.update_xaxes(
title="Position",
showline=True,
showgrid=xaxis_gridlines,
linewidth=axis_line_width,
)
if y_max < 0.1:
fig.update_yaxes(
fixedrange=True,
linewidth=axis_line_width,
range=y_range,
showgrid=yaxis_gridlines,
showline=True,
title="Edit me",
showexponent = 'all',
exponentformat = 'e',
)
else:
fig.update_yaxes(
fixedrange=True,
linewidth=axis_line_width,
range=y_range,
showgrid=yaxis_gridlines,
showline=True,
title="Edit me",
)
return fig
# ----------------------------------------------------------------------------------------
# -------------------------- Single Chromosome Graph Functions ---------------------------
def build_histogram_with_rug_plot(
topology_df,
chromosome,
chromosome_df,
template,
current_topologies,
window_size,
color_mapping,
dataRange,
topoOrder,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
# --- Set up topology data ---
# Extract current topology data
if (type(current_topologies) == str) or (type(current_topologies) == int):
wanted_rows = topology_df[topology_df["TopologyID"] == current_topologies]
elif type(current_topologies) == list:
wanted_rows = topology_df[topology_df["TopologyID"].isin(current_topologies)]
# Add in psuedodata for missing current_topologies (fixes issue where topology is dropped from legend)
if len(wanted_rows['TopologyID'].unique()) < len(current_topologies):
missing_topologies = [t for t in current_topologies if t not in wanted_rows['TopologyID'].unique()]
for mt in missing_topologies:
missing_row_data = [chromosome, 0, 'NA', mt] + ['NULL']*(len(wanted_rows.columns)-4)
missing_row = pd.DataFrame(data={i:j for i,j in zip(wanted_rows.columns, missing_row_data)}, index=[0])
wanted_rows = pd.concat([wanted_rows, missing_row])
# Group data by topology ID
grouped_topology_df = wanted_rows.sort_values(['TopologyID'],ascending=False).groupby(by='TopologyID')
# Set row heights based on number of current_topologies being shown
if len(current_topologies) <= 6:
subplot_row_heights = [1, 1]
elif len(current_topologies) <= 8:
subplot_row_heights = [4, 2]
else:
subplot_row_heights = [8, 2]
# Build figure
# fig = make_subplots(rows=2, cols=1, row_heights=subplot_row_heights, vertical_spacing=0.05, shared_xaxes=True)
fig = make_subplots(rows=2, cols=1, vertical_spacing=0.05, shared_xaxes=True)
for topology, data in grouped_topology_df:
fig.add_trace(
go.Scatter(
x=data['Window'],
y=data['TopologyID'],
name=topology,
legendgroup=topology,
mode='markers',
marker_symbol='line-ns-open',
marker_line_width=1,
marker_color=[color_mapping[topology]]*len(data),
),
# go.Box(
# x=data['Window'],
# y=data['TopologyID'],
# boxpoints='all',
# jitter=0,
# legendgroup=topology,
# marker_symbol='line-ns-open',
# marker_color=color_mapping[topology],
# name=topology,
# ),
row=1, col=1,
)
fig.add_trace(
go.Bar(
x=data['Window'],
y=[1]*len(data),
name=topology,
legendgroup=topology,
showlegend=False,
marker_color=color_mapping[topology],
marker_line_width=0,
),
row=2, col=1
)
# Update layout + axes
fig.update_layout(
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
itemsizing='constant'
),
hovermode="x unified",
font=dict(family=font_family,),
)
fig.update_xaxes(
rangemode="tozero",
range=dataRange,
linewidth=axis_line_width,
showgrid=xaxis_gridlines,
row=1,
col=1
)
fig.update_xaxes(
rangemode="tozero",
range=dataRange,
linewidth=axis_line_width,
title='Position',
showgrid=xaxis_gridlines,
row=2,
col=1,
)
fig.update_yaxes(
rangemode="tozero",
categoryarray=topoOrder,
linewidth=axis_line_width,
showgrid=yaxis_gridlines,
showticklabels=False,
fixedrange=True,
ticklen=0,
title="",
type='category',
row=1,
col=1,
)
fig.update_yaxes(
rangemode="tozero",
fixedrange=True,
linewidth=axis_line_width,
nticks=1,
showgrid=yaxis_gridlines,
showticklabels=False,
ticklen=0,
title="",
row=2,
col=1,
)
return fig
def build_rug_plot(
topology_df,
chromosome,
template,
current_topologies,
color_mapping,
dataRange,
topoOrder,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
# --- Group wanted data ---
if (type(current_topologies) == str) or (type(current_topologies) == int):
wanted_rows = topology_df[topology_df["TopologyID"] == current_topologies]
elif type(current_topologies) == list:
wanted_rows = topology_df[topology_df["TopologyID"].isin(current_topologies)]
# Add in psuedodata for missing current_topologies (fixes issue where topology is dropped from legend)
if len(wanted_rows['TopologyID'].unique()) < len(current_topologies):
missing_topologies = [t for t in current_topologies if t not in wanted_rows['TopologyID'].unique()]
for mt in missing_topologies:
missing_row_data = [chromosome, 0, 'NA', mt] + ['NULL']*(len(wanted_rows.columns)-4)
missing_row = pd.DataFrame(data={i:j for i,j in zip(wanted_rows.columns, missing_row_data)}, index=[0])
wanted_rows = pd.concat([wanted_rows, missing_row])
else:
pass
# --- Group data by topology ID
grouped_topology_df = wanted_rows.groupby(by='TopologyID')
# --- Build figure ---
fig = go.Figure()
for topology, data in grouped_topology_df:
fig.add_trace(go.Scatter(
x=data['Window'],
y=data['TopologyID'],
name=topology,
legendgroup=topology,
mode='markers',
marker_symbol='line-ns-open',
marker_size=int(100/len(grouped_topology_df)),
marker_line_width=1,
marker_color=[color_mapping[topology]]*len(data),
))
# Update figure layout + axes
fig.update_layout(
template=template,
legend_title_text='Topology',
xaxis_title_text='Position',
margin=dict(
l=60,
r=60,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
),
hovermode="x unified",
font=dict(family=font_family,),
)
fig.update_xaxes(
rangemode="tozero",
range=dataRange,
linewidth=axis_line_width,
showgrid=xaxis_gridlines,
showline=True,
)
fig.update_yaxes(
fixedrange=True,
title="",
showline=True,
showgrid=yaxis_gridlines,
linewidth=axis_line_width,
showticklabels=False,
type='category',
categoryarray=topoOrder,
)
fig.for_each_annotation(lambda a: a.update(text=""))
return fig
def build_tile_plot(
topology_df_filtered,
chromosome_df,
template,
current_topologies,
color_mapping,
dataRange,
window_size,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
# Extract current topology data
if (type(current_topologies) == str) or (type(current_topologies) == int):
wanted_rows = topology_df_filtered[topology_df_filtered["TopologyID"] == current_topologies]
elif type(current_topologies) == list:
wanted_rows = topology_df_filtered[topology_df_filtered["TopologyID"].isin(current_topologies)]
# fig = px.histogram(
# wanted_rows,
# x="Window",
# y=[1]*len(wanted_rows),
# color="TopologyID",
# color_discrete_map=color_mapping,
# nbins=int(chromosome_df["End"].max()/window_size)
# )
grouped_topology_df = wanted_rows.groupby(by='TopologyID')
# Build figure
fig = go.Figure()
for topology, data in grouped_topology_df:
fig.add_trace(
go.Scatter(
x=data['Window'],
y=[1]*len(data),
name=topology,
legendgroup=topology,
mode='markers',
marker_symbol='line-ns-open',
marker_size=225,
# marker_line_width=2,
marker_color=[color_mapping[topology]]*len(data),
# showlegend = False
),
)
# Update layout + axes
fig.update_layout(
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
),
hovermode="x unified",
font=dict(family=font_family,),
)
fig.update_xaxes(
linewidth=axis_line_width,
rangemode="tozero",
range=dataRange,
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
fixedrange=True,
linewidth=axis_line_width,
# range=[0, 1],
showline=False,
showgrid=yaxis_gridlines,
showticklabels=False,
ticklen=0,
title="",
)
return fig
def build_alt_data_graph(
alt_data_to_graph,
chromosome_df,
color_mapping,
topology_df,
window_size,
template,
dataRange,
y_max,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
# Check input type and graph accordingly
try:
input_type = type(topology_df[alt_data_to_graph].dropna().to_list()[0])
except IndexError:
return no_data_graph(template)
if input_type == str:
alt_data_graph_data = make_alt_data_str_figure(
alt_data_to_graph,
chromosome_df,
color_mapping,
topology_df,
window_size,
template,
dataRange,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
False,
)
else:
alt_data_graph_data = make_alt_data_int_figure(
alt_data_to_graph,
color_mapping,
topology_df,
chromosome_df,
template,
dataRange,
y_max,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
False,
)
return alt_data_graph_data
def build_whole_genome_alt_data_graph(
alt_data_to_graph,
chromosome_df,
color_mapping,
topology_df,
window_size,
template,
y_max,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
# Check input type and graph accordingly
try:
input_type = type(topology_df[alt_data_to_graph].dropna().to_list()[0])
except IndexError:
return no_data_graph(template)
if input_type == str:
alt_data_graph_data = make_alt_data_str_figure(
alt_data_to_graph,
chromosome_df,
color_mapping,
topology_df,
window_size,
template,
None,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
True,
)
else:
alt_data_graph_data = make_alt_data_int_figure(
alt_data_to_graph,
color_mapping,
topology_df,
chromosome_df,
template,
None,
y_max,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
True,
)
return alt_data_graph_data
def build_gff_figure(
data,
dataRange,
template,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
regionStart, regionEnd = dataRange
# Show gene names if showing less than 1Mb of data
# if abs(regionEnd - regionStart) <= 10000000:
if abs(regionEnd - regionStart) <= 10000000:
show_gene_names = True
else:
show_gene_names = False
# Separate
# group data by feature and gene name
attr_group = data.groupby(by=['feature', 'attribute', 'strand'])
positive_text_pos = "top center"
negative_text_pos = "top center"
features_graphed = list()
fig = go.Figure()
y_idx = 1
curr_feature = dict()
for fg, gene_data in attr_group:
feature, gene, strand = fg
feature_strand = f"{feature} ({strand})"
x_values = sorted(gene_data['start'].to_list() + gene_data['end'].to_list())
# Update y-axis value if new feature
if not curr_feature:
curr_feature[feature_strand] = y_idx
y_idx += 1
elif feature_strand in curr_feature.keys():
pass
else:
curr_feature[feature_strand] = y_idx
y_idx += 1
# Set legend show if feature in list already
if feature_strand in features_graphed:
show_legend = False
else:
show_legend = True
features_graphed.append(feature_strand)
# Set color, y-values, and arrow direction
if strand == '+':
colorValue = 'red'
y_values = [curr_feature[feature_strand]]*len(x_values)
markerSymbol = ['square']*(len(x_values)-1) + ['triangle-right']
text_pos = positive_text_pos
text_val = [gene] + ['']*(len(x_values)-1)
if positive_text_pos == "top center":
positive_text_pos = "bottom center"
elif positive_text_pos == "bottom center":
positive_text_pos = "top center"
else:
colorValue = '#009BFF'
y_values = [curr_feature[feature_strand]]*len(x_values)
markerSymbol = ['triangle-left'] + ['square']*(len(x_values)-1)
text_pos = negative_text_pos
text_val = ['']*(len(x_values)-1) + [gene]
if negative_text_pos == "top center":
negative_text_pos = "bottom center"
elif negative_text_pos == "bottom center":
negative_text_pos = "top center"
if show_gene_names:
fig.add_trace(go.Scatter(
x=x_values,
y=y_values,
name=feature_strand,
legendgroup=feature_strand,
mode='markers+lines+text',
marker_symbol=markerSymbol,
marker_size=8,
marker_color=colorValue,
text=text_val,
textposition=text_pos,
textfont=dict(
size=10,
),
hovertemplate=None,
showlegend=show_legend,
))
else:
fig.add_trace(go.Scatter(
x=x_values,
y=y_values,
name=feature_strand,
legendgroup=feature_strand,
mode='markers+lines',
marker_symbol=markerSymbol,
marker_size=8,
marker_color=colorValue,
# hoverinfo=['all'],
hovertemplate=None,
showlegend=show_legend,
))
fig.update_layout(
hovermode="x unified",
showlegend=True,
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
),
template=template,
title='',
margin=dict(
l=62,
r=50,
b=20,
t=20,
),
height=150*len(features_graphed),
font=dict(family=font_family,),
)
fig.update_xaxes(
range=dataRange,
title='Position',
matches="x",
rangemode="tozero",
linewidth=axis_line_width,
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
range=[0, len(features_graphed)+1],
fixedrange=True,
showticklabels=False,
showgrid=yaxis_gridlines,
title='',
linewidth=axis_line_width,
)
return fig
# ----------------------------------------------------------------------------------------
# ------------------------------- Quantile Graph Functions -------------------------------
def get_quantile_coordinates(
chromLengths,
QUANTILES,
WINDOWSIZE,
):
quantileCoordinates = pd.DataFrame(columns=chromLengths["Chromosome"], index=range(1, QUANTILES+1))
for row in chromLengths.itertuples(index=False):
chrom, _, end = row
chunkSize = end // QUANTILES
for i in range(QUANTILES):
q = i + 1
if q == 1:
quantileCoordinates.at[q, chrom] = [0, chunkSize]
else:
quantileCoordinates.at[q, chrom] = [chunkSize*(q-1) + WINDOWSIZE, chunkSize*q]
return quantileCoordinates
def calculateFrequencies(
quantileCoordinates,
input_df,
chromLengths,
QUANTILES,
):
quantileFrequencies = pd.DataFrame(columns=chromLengths["Chromosome"], index=range(1, QUANTILES+1))
topos = input_df["TopologyID"].unique()
for chrom in quantileCoordinates.columns:
for q, quantile in enumerate(quantileCoordinates[chrom], 1):
quantileData = input_df[(input_df['Window'] >= quantile[0]) & (input_df['Window'] <= quantile[1]) & (input_df['Chromosome'] == chrom)]
topoQD = quantileData['TopologyID'].value_counts().to_dict()
# Add missing topologies as count=0
for i in topos:
if i not in topoQD.keys():
topoQD[i] = 0
quantileFrequencies.at[q, chrom] = topoQD
continue
return quantileFrequencies
def plot_frequencies(
quantileFrequencies,
n_quantiles,
template,
color_mapping,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
):
def reorganizeDF(df):
new_df = pd.DataFrame(columns=['Chr', 'Quantile', 'TopologyID', 'Frequency'])
nidx = 0
for c in df.columns:
for idx in df.index:
chromTotal = sum([v for v in df.at[idx, c].values()])
for topo, freq in zip(df.at[idx, c].keys(), df.at[idx, c].values()):
new_df.at[nidx, 'TopologyID'] = topo
new_df.at[nidx, 'Chr'] = c
new_df.at[nidx, 'Quantile'] = idx
try:
new_df.at[nidx, 'Frequency'] = int(freq)/chromTotal
except ZeroDivisionError:
new_df.at[nidx, 'Frequency'] = 0.0
nidx += 1
return new_df
# Organize DataFrame
organizedDF= reorganizeDF(quantileFrequencies)
# Create line graph
fig = px.line(
organizedDF,
x='Quantile',
y='Frequency',
color='TopologyID',
facet_col='Chr',
facet_col_wrap=1,
facet_row_spacing=0.01,
color_discrete_map=color_mapping,
)
fig.update_traces(texttemplate='%{text:.3}', textposition='top center')
if len(organizedDF["Chr"].unique()) == 1:
fig.update_layout(
uniformtext_minsize=12,
template=template,
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
),
height=300,
)
else:
fig.update_layout(
uniformtext_minsize=12,
template=template,
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
),
height=100*len(organizedDF["Chr"].unique()),
)
fig.update_xaxes(
range=[1, n_quantiles],
rangemode="tozero",
linewidth=axis_line_width,
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
range=[0, 1],
fixedrange=True,
showgrid=yaxis_gridlines,
linewidth=axis_line_width,
)
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
return fig
def calculate_topo_quantile_frequencies(df, current_topologies, additional_data, n_quantiles):
final_df = pd.DataFrame(columns=["TopologyID", "Frequency", "Quantile"])
for topology in current_topologies:
topo_df = pd.DataFrame(columns=["TopologyID", "Frequency", "Quantile"])
tidx = 0
df = df.sort_values(by=additional_data)
df = df.assign(Quantile = pd.qcut(df[additional_data].rank(method='first'), q=n_quantiles, labels=False))
df['Quantile'] = df['Quantile'].apply(lambda x: x+1)
df_group = df.groupby(by="Quantile")
for rank, data in df_group:
counts = data["TopologyID"].value_counts()
for t, f in zip(counts.index, counts):
if t == topology:
topo_df.at[tidx, "TopologyID"] = t
topo_df.at[tidx, "Frequency"] = f/len(df)
topo_df.at[tidx, "Quantile"] = rank
tidx += 1
break
else:
continue
# -- Concat dfs --
final_df = pd.concat([final_df, topo_df])
return final_df
def plot_frequencies_topo_quantile(
final_df,
template,
color_mapping,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
graph_title,
additional_data
):
fig = px.line(
final_df,
x="Quantile", y="Frequency",
color="TopologyID",
color_discrete_map=color_mapping,
markers=True,
)
fig.update_layout(
template=template,
title=graph_title,
title_x=0.5,
margin=dict(
t=80
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
# itemsizing='constant'
),
)
fig.update_xaxes(
title=f"{additional_data} Quantiles",
linewidth=axis_line_width,
showgrid=xaxis_gridlines,
tick0=0,
dtick=1,
)
fig.update_yaxes(
rangemode="tozero",
linewidth=axis_line_width,
showgrid=yaxis_gridlines,
title='% Windows Observed',
)
return fig
# ---------------------------------------------------------------------------------
# -------------------------------- Whole Genome Graph Functions -------------------------------
def build_topology_frequency_pie_chart(
df,
template,
color_mapping,
font_family,
):
"""Returns pie graph for whole genome topology frequencies"""
fig = px.pie(
df,
values='Frequency',
names='TopologyID',
color="TopologyID",
color_discrete_map=color_mapping,
template=template,
title='Whole Genome Topology Frequencies',
)
fig.update_traces(textposition='inside')
fig.update_layout(
margin=dict(l=120, r=20, t=40, b=10),
uniformtext_minsize=12,
uniformtext_mode='hide',
legend=dict(itemclick=False, itemdoubleclick=False),
title_x=0.5,
font=dict(family=font_family,),
)
return fig
def build_rf_graph(
df,
ref_topo,
template,
color_mapping,
axis_line_width,
font_family,
):
fig = px.bar(
df, x="TopologyID", y="normRF-Distance",
color="TopologyID", color_discrete_map=color_mapping,
text='normRF-Distance')
fig.update_traces(texttemplate='%{text:.2f}', textposition='inside')
fig.update_layout(
title=f"Normalized RF-Distance from {ref_topo}",
title_x=0.5,
template=template,
font=dict(family=font_family,),
)
fig.update_xaxes(linewidth=axis_line_width)
fig.update_yaxes(linewidth=axis_line_width, range=[0, 1])
return fig
def build_whole_genome_rug_plot(
df,
chrom_df,
chromGroup,
template,
color_mapping,
currTopologies,
topoOrder,
window_size,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
wg_squish_expand,
font_family,
):
df = df[(df['TopologyID'].isin(currTopologies)) & (df['Chromosome'].isin(chromGroup))]
grouped_topology_df = df.groupby(by='TopologyID')
num_chroms = len(df['Chromosome'].unique())
chrom_row_dict = {chrom:i for chrom, i in zip(sorted(df['Chromosome'].unique()), range(1, len(df['Chromosome'].unique())+1, 1))}
chrom_shapes = []
row_height = [2]*num_chroms
# --- Build figure ---
# If chromosome name longer than 5 characters, use subplot titles
# instead of row ittles
if df.Chromosome.map(len).max() > 5:
fig = make_subplots(
rows=num_chroms,
subplot_titles=chrom_row_dict.keys(),
shared_xaxes=True,
cols=1,
row_heights=row_height,
)
else:
fig = make_subplots(
rows=num_chroms,
row_titles=[c for c in chrom_row_dict.keys()],
shared_xaxes=True,
cols=1,
row_heights=row_height,
)
for topology, data in grouped_topology_df:
add_legend = True
for chrom in chrom_row_dict.keys():
chrom_data = data[data["Chromosome"] == chrom]
chrom_length_data = chrom_df[chrom_df['Chromosome'] == chrom]
chrom_length = chrom_length_data['End'].max()
if len(chrom_data) == 0:
fig.add_trace(
go.Scatter(
x=[0],
y=[topology],
name=topology,
legendgroup=topology,
mode='markers',
marker_symbol='line-ns-open',
marker_color=[color_mapping[topology]]*len(chrom_data),
showlegend = False,
),
row=chrom_row_dict[chrom], col=1,
)
elif add_legend:
fig.add_trace(
go.Scatter(
x=chrom_data['Window'],
y=chrom_data['TopologyID'],
name=topology,
legendgroup=topology,
mode='markers',
# marker_size=int(25/len(grouped_topology_df)),
marker_symbol='line-ns-open',
marker_color=[color_mapping[topology]]*len(chrom_data),
),
# go.Box(
# x=chrom_data['Window'],
# y=chrom_data['TopologyID'],
# boxpoints='all',
# jitter=0,
# legendgroup=topology,
# marker_symbol='line-ns-open',
# marker_color=color_mapping[topology],
# name=topology,
# ),
row=chrom_row_dict[chrom], col=1,
)
chrom_shapes.append(dict(type="line", xref="x", yref="y", x0=chrom_length, x1=chrom_length, y0=-1, y1=len(currTopologies), line_width=2))
add_legend = False
else:
fig.add_trace(
go.Scatter(
x=chrom_data['Window'],
y=chrom_data['TopologyID'],
name=topology,
legendgroup=topology,
mode='markers',
# marker_size=int(25/len(grouped_topology_df)),
marker_symbol='line-ns-open',
marker_color=[color_mapping[topology]]*len(chrom_data),
showlegend = False,
),
# go.Box(
# x=chrom_data['Window'],
# y=chrom_data['TopologyID'],
# boxpoints='all',
# jitter=0,
# marker_symbol='line-ns-open',
# marker_color=color_mapping[topology],
# legendgroup=topology,
# showlegend = False,
# name=topology,
# ),
row=chrom_row_dict[chrom], col=1,
)
chrom_ref = chrom_row_dict[chrom]
chrom_shapes.append(dict(type="rect", xref=f"x{chrom_ref}", yref=f"y{chrom_ref}", x0=chrom_length, x1=chrom_length, y0=-1, y1=len(currTopologies), line_width=2))
# Update layout + axes
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_xaxes(
rangemode="tozero",
range=[0, (chrom_df['End'].max()+(2*window_size))],
fixedrange=True,
linewidth=axis_line_width,
ticklen=0,
matches="x",
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
fixedrange=True,
title="",
showgrid=yaxis_gridlines,
showticklabels=False,
linewidth=axis_line_width,
categoryarray=topoOrder,
)
if wg_squish_expand == 'expand':
if num_chroms < 5:
fig.update_layout(
template=template,
legend_title_text='Topology',
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
height=160*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
fig.update_layout(
template=template,
legend_title_text='Topology',
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
height=100*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
elif wg_squish_expand == 'squish':
if num_chroms < 5:
fig.update_layout(
template=template,
legend_title_text='Topology',
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
height=125*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
fig.update_layout(
template=template,
legend_title_text='Topology',
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
height=50*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
if num_chroms < 5:
fig.update_layout(
template=template,
legend_title_text='Topology',
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
height=105*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
fig.update_layout(
template=template,
legend_title_text='Topology',
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
height=20*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
margin=dict(
t=10,
b=30,
),
font=dict(family=font_family,),
)
# Rotate chromosome names to 0-degrees
for annotation in fig['layout']['annotations']:
annotation['textangle']=0
annotation['align']="center"
return fig
def build_whole_genome_tile_plot(
df,
chrom_df,
template,
color_mapping,
currTopologies,
topoOrder,
window_size,
axis_line_width,
chromGroup,
xaxis_gridlines,
yaxis_gridlines,
wg_squish_expand,
font_family,
):
"""
Max chromosomes per graph if # current_topologies <= 3: 20
Max chromosomes per graph if # current_topologies > 3: 20/2
Returns: List of figures to display
"""
df = df[df['TopologyID'].isin(currTopologies)]
df = df[df['Chromosome'].isin(chromGroup)]
grouped_topology_df = df.groupby(by='TopologyID')
num_chroms = len(df['Chromosome'].unique())
chrom_row_dict = {chrom:i for chrom, i in zip(sorted(df['Chromosome'].unique()), range(1, len(df['Chromosome'].unique())+1, 1))}
chrom_shapes = []
# --- Build figure ---
# If longest chromosome name longer
# than 5 characters, use subplot titles
# instead of row titles
if df.Chromosome.map(len).max() > 5:
fig = make_subplots(
rows=num_chroms,
cols=1,
shared_xaxes=True,
subplot_titles=chrom_row_dict.keys(),
vertical_spacing=0.03,
)
else:
fig = make_subplots(
rows=num_chroms,
cols=1,
shared_xaxes=True,
row_titles=[c for c in chrom_row_dict.keys()],
vertical_spacing=0.001,
)
for topology, data in grouped_topology_df:
add_legend = True
for chrom in chrom_row_dict.keys():
chrom_data = data[data["Chromosome"] == chrom]
chrom_length_data = chrom_df[chrom_df['Chromosome'] == chrom]
chrom_length = chrom_length_data['End'].max()
if add_legend:
fig.add_trace(
go.Histogram(
x=chrom_data['Window'],
y=[1]*len(chrom_data),
nbinsx=int(chrom_length/window_size),
name=topology,
legendgroup=topology,
marker_line_width=0,
marker_color=color_mapping[topology],
),
row=chrom_row_dict[chrom], col=1,
)
chrom_shapes.append(dict(type="line", xref="x", yref="y", x0=chrom_length, x1=chrom_length, y0=0, y1=1, line_width=2))
add_legend = False
else:
fig.add_trace(
go.Histogram(
x=chrom_data['Window'],
y=[1]*len(chrom_data),
nbinsx=int(chrom_length/window_size),
name=topology,
legendgroup=topology,
marker_line_width=0,
marker_color=color_mapping[topology],
showlegend = False
),
row=chrom_row_dict[chrom], col=1,
)
chrom_ref = chrom_row_dict[chrom]
chrom_shapes.append(dict(type="rect", xref=f"x{chrom_ref}", yref=f"y{chrom_ref}", x0=chrom_length, x1=chrom_length, y0=0, y1=1, line_width=2))
# Update layout + axes
if wg_squish_expand == 'expand':
if num_chroms < 5:
fig.update_layout(
barmode="relative",
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
hovermode="x unified",
height=130*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
fig.update_layout(
barmode="relative",
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
hovermode="x unified",
height=100*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
elif wg_squish_expand == 'squish':
if num_chroms < 5:
fig.update_layout(
barmode="relative",
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
hovermode="x unified",
height=80*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
fig.update_layout(
barmode="relative",
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
hovermode="x unified",
height=50*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
if num_chroms < 5:
fig.update_layout(
barmode="relative",
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
hovermode="x unified",
height=55*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
fig.update_layout(
barmode="relative",
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
hovermode="x unified",
height=20*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
fig.update_xaxes(
linewidth=axis_line_width,
fixedrange=True,
rangemode="tozero",
range=[0, chrom_df['End'].max()],
ticklen=0,
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
# categoryarray=topoOrder,
range=[0, 1],
fixedrange=True,
linewidth=axis_line_width,
showgrid=yaxis_gridlines,
showticklabels=False,
title="",
ticklen=0,
)
# Rotate chromosome names to 0-degrees
for annotation in fig['layout']['annotations']:
annotation['textangle']=0
annotation['align']="center"
return fig
def build_whole_genome_bar_plot(
df,
template,
color_mapping,
currTopologies,
axis_line_width,
chromGroup,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
# Filter df to chromosomes in group
df = df[df['Chromosome'].isin(chromGroup)]
df = df[df['TopologyID'].isin(currTopologies)]
number_of_chrom_rows = len(df["Chromosome"].unique()) // 3
fig = px.bar(
df,
x='TopologyID',
y='Frequency',
facet_col='Chromosome',
facet_col_wrap=3,
facet_row_spacing=0.05,
color='TopologyID',
template=template,
color_discrete_map=color_mapping,
text='Frequency',
height=int(500*number_of_chrom_rows),
)
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_traces(texttemplate='%{text:.2}', textposition='outside')
# Remove y-axis labels
for axis in fig.layout:
if type(fig.layout[axis]) == go.layout.YAxis:
fig.layout[axis].title.text = ''
fig.update_layout(
uniformtext_minsize=12,
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
),
margin=dict(l=10, r=10, t=10, b=10),
title="",
annotations = list(fig.layout.annotations) +
[go.layout.Annotation(
x=-0.07,
y=0.5,
font=dict(
size=12,
# color='white',
),
showarrow=False,
text="Frequency",
textangle=-90,
xref="paper",
yref="paper"
)
],
title_x=0.5,
font=dict(family=font_family,),
)
fig.update_xaxes(
title="",
linewidth=axis_line_width,
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
range=[0, 1.1],
matches='y',
linewidth=axis_line_width,
showgrid=yaxis_gridlines,
)
return fig
def build_whole_genome_pie_charts(
df,
template,
color_mapping,
chromGroup,
font_family,
):
# Filter df to chromosomes in group
df = df[df['Chromosome'].isin(chromGroup)]
number_of_chrom_rows = (len(df["Chromosome"].unique()) // 3)+(math.ceil(len(df["Chromosome"].unique()) % 3))
specs = [[{'type':'domain'}, {'type':'domain'}, {'type':'domain'}] for _ in range(number_of_chrom_rows)]
fig = make_subplots(
rows=number_of_chrom_rows,
cols=3,
specs=specs,
vertical_spacing=0.03,
horizontal_spacing=0.001,
subplot_titles=sorted(df["Chromosome"].unique()),
column_widths=[2]*3,
)
col_pos = 1
row_num = 1
for c in sorted(df['Chromosome'].unique()):
chrom_df = df[df["Chromosome"] == c]
fig.add_trace(go.Pie(labels=chrom_df["TopologyID"], values=chrom_df['Frequency'], marker_colors=list(color_mapping.values())), row=row_num, col=col_pos)
if col_pos == 3:
col_pos = 1
row_num += 1
else:
col_pos += 1
fig.update_traces(textposition='inside')
fig.update_layout(
uniformtext_minsize=12,
showlegend=True,
template=template,
height=int(200*number_of_chrom_rows),
font=dict(family=font_family,),
)
return fig
# ---------------------------------------------------------------------------------
# --------------------------- Stats DataFrame Generators --------------------------
def _get_valid_cols(topology_df):
valid_cols = list()
for i in topology_df.columns[4:]:
data = topology_df[i].unique()
flag = None
for j in data:
if type(j) == str:
flag = False
break
else:
flag = True
if flag:
valid_cols.append(i)
else:
continue
return valid_cols
def basic_stats_dfs(topology_df):
"""Generate dataframes of basic statistics
:param topology_df: Current View Tree Viewer input file dataframe
:type topology_df: Object
"""
# Calculate current view topologies
topo_freq_df = pd.DataFrame(topology_df["TopologyID"].value_counts()/len(topology_df))
if len(topo_freq_df) > 25: # If more than 25 topologies loaded, just show top 25
topo_freq_df = topo_freq_df.head(25)
remainder_freq = 1.0 - sum(topo_freq_df['TopologyID'])
topo_freq_df.at["Other", "TopologyID"] = remainder_freq
topo_names = [i for i in topo_freq_df.index]
topo_freqs = [round(i, 4) for i in topo_freq_df["TopologyID"]]
# Calculate median + average of additional data
if len(topology_df.columns) > 4:
valid_cols = _get_valid_cols(topology_df)
additional_dt_names = [i for i in valid_cols]
additional_dt_avg = [topology_df[i].mean() for i in valid_cols]
additional_dt_std = [topology_df[i].std() for i in valid_cols]
topo_freq_df = pd.DataFrame(
{
"TopologyID": topo_names,
"Frequency": topo_freqs,
}
)
additional_data_df = pd.DataFrame(
{
"Additional Data": additional_dt_names,
"Average": additional_dt_avg,
"Std Dev": additional_dt_std,
}
)
return topo_freq_df, additional_data_df
else: # No additional data types present in file
topo_freq_df = pd.DataFrame(
{
"TopologyID": topo_names,
"Frequency": topo_freqs,
}
)
return topo_freq_df,
|
pd.DataFrame()
|
pandas.DataFrame
|
import os
import numpy
import pandas as pd
import scipy.stats as st
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs')
def summary_cost(int_details,ctrl_m,ctrl_f,trt_m,trt_f, text):
int_dwc = 1 / (1 + discount_rate) ** numpy.array(range(time_horizon))
int_c = numpy.array([[prog_cost] * time_horizon for i in range(1)])
int_cost = numpy.sum(numpy.dot(int_c, int_dwc))
female_pop = 188340000
male_pop = 196604000
pop = female_pop + male_pop
f_prop = female_pop / pop
m_prop = male_pop / pop
samples = ctrl_m.shape[0]
cs = 0
nq = 0
ic = [0.00 for i in range(samples)]
q_gained = [0.00 for i in range(samples)]
q_inc_percent = [0.00 for i in range(samples)]
htn_cost = [0.00 for i in range(samples)]
cvd_cost = [0.00 for i in range(samples)]
net_cost = [0.00 for i in range(samples)]
exp_inc_per = [0.00 for i in range(samples)]
for i in range(samples):
q_gained[i] = (((ctrl_m.loc[i, "Average DALYs"] - trt_m.loc[i, "Average DALYs"])* m_prop) + ((ctrl_f.loc[i, "Average DALYs"] - trt_f.loc[i, "Average DALYs"])* f_prop))
q_inc_percent[i] = q_gained[i] * 100/((ctrl_m.loc[i, "Average DALYs"] * m_prop) + (ctrl_f.loc[i, "Average DALYs"] *f_prop))
htn_cost[i] = int_cost + ((trt_m.loc[i, "Average HTN Cost"] - ctrl_m.loc[i, "Average HTN Cost"]) * m_prop) + ((trt_f.loc[i, "Average HTN Cost"] - ctrl_f.loc[i, "Average HTN Cost"]) * f_prop)
cvd_cost[i] = ((trt_m.loc[i, "Average CVD Cost"] - ctrl_m.loc[i, "Average CVD Cost"] + trt_m.loc[i, "Average Chronic Cost"] - ctrl_m.loc[i, "Average Chronic Cost"]) * m_prop) + ((trt_f.loc[i, "Average CVD Cost"] - ctrl_f.loc[i, "Average CVD Cost"] + trt_f.loc[i, "Average Chronic Cost"] - ctrl_f.loc[i, "Average Chronic Cost"]) * f_prop)
exp_inc_per[i] = (((trt_m.loc[i, "Average Cost"] - ctrl_m.loc[i, "Average Cost"] + int_cost) * m_prop) + ((trt_f.loc[i, "Average Cost"] - ctrl_f.loc[i, "Average Cost"] + int_cost) * f_prop)) * 100 / ((ctrl_m.loc[i, "Average Cost"] * m_prop ) + (ctrl_f.loc[i, "Average Cost"] * f_prop))
net_cost[i] = htn_cost[i] + cvd_cost[i]
ic[i] = net_cost[i] / q_gained[i]
if net_cost[i] < 0:
cs = cs + 1
if q_gained[i] < 0:
nq = nq + 1
budget_impact = numpy.mean(net_cost) * pop / time_horizon
htn_percap = numpy.mean(htn_cost) / time_horizon
cvd_percap = numpy.mean(cvd_cost) / time_horizon
htn_annual = numpy.mean(htn_cost) * pop / time_horizon
cvd_annual = numpy.mean(cvd_cost) * pop / time_horizon
cost_inc = numpy.mean(exp_inc_per)
ICER = numpy.mean(ic)
QALY = numpy.mean(q_inc_percent)
HTN = numpy.mean(htn_cost)
CVD = numpy.mean(cvd_cost)
icer_95 = st.t.interval(0.95, samples - 1, loc=numpy.mean(ic), scale=st.sem(ic))
qaly_95 = st.t.interval(0.95, samples - 1, loc=numpy.mean(q_inc_percent), scale=st.sem(q_inc_percent))
htn = st.t.interval(0.95, samples - 1, loc=numpy.mean(htn_cost), scale=st.sem(htn_cost))
cvd = st.t.interval(0.95, samples - 1, loc=numpy.mean(cvd_cost), scale=st.sem(cvd_cost))
cost_inc_95 = st.t.interval(0.95, samples - 1, loc=numpy.mean(exp_inc_per), scale=st.sem(exp_inc_per))
if budget_impact < 0:
m_icer = 'Cost Saving'
s_icer = 'CS'
else:
m_icer = numpy.mean(net_cost) / numpy.mean(q_gained)
s_icer = str(numpy.round(m_icer,1))
m_daly = str(numpy.round(QALY,3)) + "\n(" + str(numpy.round(qaly_95[0],3)) + " to " + str(numpy.round(qaly_95[1],3)) + ")"
m_htn = str(numpy.round(HTN,2)) + "\n(" + str(numpy.round(htn[0],2)) + " to " + str(numpy.round(htn[1],2)) + ")"
m_cvd = str(numpy.round(CVD,2)) + "\n(" + str(numpy.round(cvd[0],2)) + " to " + str(numpy.round(cvd[1],2)) + ")"
m_costinc = str(numpy.round(cost_inc, 2)) + "\n(" + str(numpy.round(cost_inc_95[0], 2)) + " to " + str(numpy.round(cost_inc_95[1], 2)) + ")"
m_budget = str(numpy.round(budget_impact,0)/1000)
err_cost = 1.96 * st.sem(exp_inc_per)
err_daly = 1.96 * st.sem(q_inc_percent)
str_icer = text + " (" + s_icer + ")"
detailed = [int_details[2], int_details[0], int_details[1], int_details[3], int_details[4], ICER, icer_95[0],icer_95[1], QALY, qaly_95[0], qaly_95[1], htn[0], htn[1], cvd[0], cvd[1], budget_impact, htn_annual, cvd_annual, htn_percap, cvd_percap, cs, nq]
manuscript = [int_details[2], int_details[0], int_details[1], int_details[3], int_details[4], m_icer, m_daly, m_costinc, m_htn, m_cvd, m_budget, cs]
plot = [text, str_icer, cost_inc, QALY, err_cost, err_daly]
return detailed, manuscript, plot
summary_output = []
appendix_output = []
plot_output = []
'''Analysis 0: Baseline'''
time_horizon = 20
prog_cost = 0.13
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/15Aug_AWS3')
fname = [0.4, 0.3, 0, 0.8, 0.6]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
treatment_f = pd.read_csv(file_name_f)
treatment_m = pd.read_csv(file_name_m)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'Base Case')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 1: Doubled Medication Cost'''
time_horizon = 20
prog_cost = 0.13
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/PSAFinal')
fname = [0.4, 0.3, 0, 0.8, 0.6, 2, 0, 20]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8, 2, 0, 20]
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
treatment_f = pd.read_csv(file_name_f)
treatment_m = pd.read_csv(file_name_m)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'2X Medication Cost')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 2: Increased Programmatic Cost'''
time_horizon = 20
prog_cost = 0.13*4
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/15Aug_AWS3')
fname = [0.4, 0.3, 0, 0.8, 0.6]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
treatment_f = pd.read_csv(file_name_f)
treatment_m = pd.read_csv(file_name_m)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'4X Programmatic Cost')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 3: 20% reduction in baseline CVD risk'''
time_horizon = 20
prog_cost = 0.13
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/PSAFinal')
fname = [0.4, 0.3, 0, 0.8, 0.6, 1, 0.2, 20]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8, 1, 0.2, 20]
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
treatment_f = pd.read_csv(file_name_f)
treatment_m = pd.read_csv(file_name_m)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'Reduced Baseline Risk')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 4: NPCDCS Medication Protocol'''
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/15Aug_AWS3')
fname = [0.4, 0.3, 0, 0.8, 0.6]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 0, 0.8, 0.8]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
treatment_m = pd.read_csv(file_name_m)
treatment_f = pd.read_csv(file_name_f)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'NPCDCS Treatment Guideline')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 5: Private Sector Cost'''
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/PvtFinal')
fname = [0.4, 0.3, 0, 0.8, 0.6]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
treatment_m = pd.read_csv(file_name_m)
treatment_f = pd.read_csv(file_name_f)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'Private Sector')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 6: PubPvt Mix Cost'''
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/PubPvtFinal')
fname = [0.4, 0.3, 0, 0.8, 0.6]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
treatment_m = pd.read_csv(file_name_m)
treatment_f =
|
pd.read_csv(file_name_f)
|
pandas.read_csv
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Preprocess ieee-fraud-detection dataset.
(https://www.kaggle.com/c/ieee-fraud-detection).
Train shape:(590540,394),identity(144233,41)--isFraud 3.5%
Test shape:(506691,393),identity(141907,41)
############### TF Version: 1.13.1/Python Version: 3.7 ###############
"""
import os
import gc
import random
import warnings
import datetime
import numpy as np
import pandas as pd
import lightgbm as lgb
from sklearn import metrics
from sklearn.model_selection import KFold, GroupKFold
from sklearn.preprocessing import LabelEncoder
warnings.filterwarnings('ignore')
# make all processes deterministic/固定随机数生成器的种子
# environ是一个字符串所对应环境的映像对象,PYTHONHASHSEED为其中的环境变量
# Python会用一个随机的种子来生成str/bytes/datetime对象的hash值;
# 如果该环境变量被设定为一个数字,它就被当作一个固定的种子来生成str/bytes/datetime对象的hash值
def set_seed(seed=0):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
def make_predictions(tr_df, tt_df, features_columns, target, params, nfold=2):
# K折交叉验证
folds = KFold(n_splits=nfold, shuffle=True, random_state=SEED)
# folds = GroupKFold(n_splits=nfold)
# 数据集划分
train_x, train_y = tr_df[features_columns], tr_df[target]
infer_x, infer_y = tt_df[features_columns], tt_df[target]
split_groups = tr_df['DT_M']
tt_df = tt_df[["TransactionID", target]]
predictions = np.zeros(len(tt_df))
oof = np.zeros(len(tr_df))
# 模型训练与预测
for fold_, (tra_idx, val_idx) in enumerate(folds.split(train_x, train_y)):
print("-----Fold:", fold_)
tr_x, tr_y = train_x.iloc[tra_idx, :], train_y[tra_idx]
vl_x, vl_y = train_x.iloc[val_idx, :], train_y[val_idx]
print("-----Train num:", len(tr_x), "Valid num:", len(vl_x))
tr_data = lgb.Dataset(tr_x, label=tr_y)
if LOCAL_TEST:
vl_data = lgb.Dataset(infer_x, label=infer_y)
else:
vl_data = lgb.Dataset(vl_x, label=vl_y)
estimator = lgb.train(params, tr_data, valid_sets=[tr_data, vl_data], verbose_eval=100)
infer_p = estimator.predict(infer_x)
predictions += infer_p / nfold
oof_preds = estimator.predict(vl_x)
oof[val_idx] = (oof_preds - oof_preds.min()) / (oof_preds.max() - oof_preds.min())
if LOCAL_TEST:
feature_imp = pd.DataFrame(sorted(zip(estimator.feature_importance(), train_x.columns)),
columns=['Value', 'Feature'])
print(feature_imp)
del tr_x, tr_y, vl_x, vl_y, tr_data, vl_data
gc.collect()
tt_df["prediction"] = predictions
print("OOF AUC:", metrics.roc_auc_score(train_y, oof))
return tt_df
if __name__ == "__main__":
print("========== 1.Set random seed ...")
SEED = 42
set_seed(SEED)
print("========== 2.Load pkl data ...")
LOCAL_TEST = False
TARGET = "isFraud"
START_DATE = datetime.datetime.strptime("2017-11-30", "%Y-%m-%d")
dir_data_pkl = os.getcwd() + "\\ieee-fraud-pkl-no-fe\\"
train_df = pd.read_pickle(dir_data_pkl + "\\train_tran_no_fe.pkl")
if LOCAL_TEST:
# Convert TransactionDT to "Month" time-period.
# We will also drop penultimate block to "simulate" test set values difference
# TransactionDT时间属性划分,本地测试训练集最后一个月数据为测试集
train_df["DT_M"] = train_df["TransactionDT"].apply(lambda x: (START_DATE + datetime.timedelta(seconds=x)))
train_df["DT_M"] = (train_df["DT_M"].dt.year - 2017) * 12 + train_df["DT_M"].dt.month
infer_df = train_df[train_df["DT_M"] == train_df["DT_M"].max()].reset_index(drop=True)
train_df = train_df[train_df["DT_M"] < (train_df["DT_M"].max() - 1)].reset_index(drop=True)
train_id_df = pd.read_pickle(dir_data_pkl + "\\train_iden_no_fe.pkl")
infer_id_df = train_id_df[train_id_df["TransactionID"].isin(infer_df["TransactionID"])].reset_index(drop=True)
train_id_df = train_id_df[train_id_df["TransactionID"].isin(train_df["TransactionID"])].reset_index(drop=True)
del train_df["DT_M"], infer_df["DT_M"]
else:
infer_df = pd.read_pickle(dir_data_pkl + "\\infer_tran_no_fe.pkl")
train_id_df = pd.read_pickle(dir_data_pkl + "\\train_iden_no_fe.pkl")
infer_id_df = pd.read_pickle(dir_data_pkl + "\\infer_iden_no_fe.pkl")
print("-----Shape control:", train_df.shape, infer_df.shape)
rm_cols = ["TransactionID", "TransactionDT", TARGET]
base_columns = [col for col in list(train_df) if col not in rm_cols]
print("========== 3.Feature Engineering ...")
###############################################################################
# ================================ 增加新特征 ==================================
# TransactionDT[86400,15811131],START_DATE=2017-11-30
for df in [train_df, infer_df]:
df["DT"] = df["TransactionDT"].apply(lambda x: (START_DATE + datetime.timedelta(seconds=x)))
df["DT_M"] = ((df["DT"].dt.year - 2017) * 12 + df["DT"].dt.month).astype(np.int8)
df["DT_W"] = ((df["DT"].dt.year - 2017) * 52 + df["DT"].dt.weekofyear).astype(np.int8)
df["DT_D"] = ((df["DT"].dt.year - 2017) * 365 + df["DT"].dt.dayofyear).astype(np.int16)
df["DT_day_month"] = df["DT"].dt.day.astype(np.int8)
df["DT_day_week"] = df["DT"].dt.dayofweek.astype(np.int8)
df["DT_day_hour"] = df["DT"].dt.hour.astype(np.int8)
df["Is_december"] = df["DT"].dt.month
df["Is_december"] = (df["Is_december"] == 12).astype(np.int8)
df["weekday"] = df["TransactionDT"].map(lambda x: (x // (3600 * 24)) % 7)
# Total transactions per timeblock
for col in ["DT_M", "DT_W", "DT_D"]:
temp_df =
|
pd.concat([train_df[[col]], infer_df[[col]]])
|
pandas.concat
|
import numpy as np
import pandas as pd
import sys, os
import math
import collections
import warnings
warnings.filterwarnings('ignore',category=FutureWarning)
import tensorflow as tf
import argparse
import timeit
from datetime import datetime
run_time = timeit.default_timer()
from sklearn import metrics
from tensorflow.python.framework import ops
np.random.seed(0)
parser = argparse.ArgumentParser(description='RUN ACTINN')
parser.add_argument('--train_path', default="datasets/human_blood_integrated_01/train.pkl", type=str,
help='path to train data frame with labels')
parser.add_argument('--test_path', default="datasets/human_blood_integrated_01/test.pkl", type=str,
help='path to test data frame with labels')
parser.add_argument('--column', type=str, default='labels',
help='column name for cell types')
parser.add_argument("-lr", "--learning_rate", type=float,
help="Learning rate (default: 0.0001)", default=0.0001)
parser.add_argument("-ne", "--num_epochs", type=int,
help="Number of epochs (default: 50)", default=50)
parser.add_argument("-ms", "--minibatch_size", type=int,
help="Minibatch size (default: 128)", default=128)
parser.add_argument("-pc", "--print_cost", type=bool,
help="Print cost when training (default: True)", default=True)
parser.add_argument("-op", "--output_probability", type=bool,
help="Output the probabilities for each cell being the cell types in the training data (default: False)", default=False)
# Get common genes, normalize and scale the sets
def scale_sets(sets, isint=False):
# input -- a list of all the sets to be scaled
# output -- scaled sets
common_genes = set(sets[0].index)
for i in range(1, len(sets)):
common_genes = set.intersection(set(sets[i].index),common_genes)
common_genes = sorted(list(common_genes))
sep_point = [0]
for i in range(len(sets)):
sets[i] = sets[i].loc[common_genes,]
sep_point.append(sets[i].shape[1])
total_set = np.array(pd.concat(sets, axis=1, sort=False), dtype=np.float32)
if isint:
total_set = np.divide(total_set, np.sum(total_set, axis=0, keepdims=True)) * 10000
total_set = np.log2(total_set+1)
# expr = np.sum(total_set, axis=1)
# total_set = total_set[np.logical_and(expr >= np.percentile(expr, 1), expr <= np.percentile(expr, 99)),]
# cv = np.std(total_set, axis=1) / np.mean(total_set, axis=1)
# total_set = total_set[np.logical_and(cv >= np.percentile(cv, 1), cv <= np.percentile(cv, 99)),]
for i in range(len(sets)):
sets[i] = total_set[:, sum(sep_point[:(i+1)]):sum(sep_point[:(i+2)])]
return sets
# Turn labels into matrix
def one_hot_matrix(labels, C):
# input -- labels (true labels of the sets), C (# types)
# output -- one hot matrix with shape (# types, # samples)
C = tf.constant(C, name = "C")
one_hot_matrix = tf.one_hot(labels, C, axis = 0)
sess = tf.Session()
one_hot = sess.run(one_hot_matrix)
sess.close()
return one_hot
# Make types to labels dictionary
def type_to_label_dict(types):
# input -- types
# output -- type_to_label dictionary
type_to_label_dict = {}
all_type = list(set(types))
for i in range(len(all_type)):
type_to_label_dict[all_type[i]] = i
return type_to_label_dict
# Convert types to labels
def convert_type_to_label(types, type_to_label_dict):
# input -- list of types, and type_to_label dictionary
# output -- list of labels
types = list(types)
labels = list()
for type in types:
labels.append(type_to_label_dict[type])
return labels
# Function to create placeholders
def create_placeholders(n_x, n_y):
X = tf.placeholder(tf.float32, shape = (n_x, None))
Y = tf.placeholder(tf.float32, shape = (n_y, None))
return X, Y
# Initialize parameters
def initialize_parameters(nf, ln1, ln2, ln3, nt):
# input -- nf (# of features), ln1 (# nodes in layer1), ln2 (# nodes in layer2), nt (# types)
# output -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3
tf.set_random_seed(3) # set seed to make the results consistant
W1 = tf.get_variable("W1", [ln1, nf], initializer = tf.contrib.layers.xavier_initializer(seed = 3))
b1 = tf.get_variable("b1", [ln1, 1], initializer = tf.zeros_initializer())
W2 = tf.get_variable("W2", [ln2, ln1], initializer = tf.contrib.layers.xavier_initializer(seed = 3))
b2 = tf.get_variable("b2", [ln2, 1], initializer = tf.zeros_initializer())
W3 = tf.get_variable("W3", [ln3, ln2], initializer = tf.contrib.layers.xavier_initializer(seed = 3))
b3 = tf.get_variable("b3", [ln3, 1], initializer = tf.zeros_initializer())
W4 = tf.get_variable("W4", [nt, ln3], initializer = tf.contrib.layers.xavier_initializer(seed = 3))
b4 = tf.get_variable("b4", [nt, 1], initializer = tf.zeros_initializer())
parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2, "W3": W3, "b3": b3, "W4": W4, "b4": b4}
return parameters
# Forward propagation function
def forward_propagation(X, parameters):
# function model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
# input -- dataset with shape (# features, # sample), parameters "W1", "b1", "W2", "b2", "W3", "b3"
# output -- the output of the last linear unit
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
W4 = parameters['W4']
b4 = parameters['b4']
# forward calculations
Z1 = tf.add(tf.matmul(W1, X), b1)
A1 = tf.nn.relu(Z1)
Z2 = tf.add(tf.matmul(W2, A1), b2)
A2 = tf.nn.relu(Z2)
Z3 = tf.add(tf.matmul(W3, A2), b3)
A3 = tf.nn.relu(Z3)
Z4 = tf.add(tf.matmul(W4, A3), b4)
return Z4
# Compute cost
def compute_cost(Z4, Y, parameters, lambd=0.01):
# input -- Z3 (output of forward propagation with shape (# types, # samples)), Y (true labels, same shape as Z3)
# output -- tensor of teh cost function
logits = tf.transpose(Z4)
labels = tf.transpose(Y)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits = logits, labels = labels)) + \
(tf.nn.l2_loss(parameters["W1"]) + tf.nn.l2_loss(parameters["W2"]) + tf.nn.l2_loss(parameters["W3"]) + tf.nn.l2_loss(parameters["W4"])) * lambd
return cost
# Get the mini batches
def random_mini_batches(X, Y, mini_batch_size=32, seed=1):
# input -- X (training set), Y (true labels)
# output -- mini batches
ns = X.shape[1]
mini_batches = []
np.random.seed(seed)
# shuffle (X, Y)
permutation = list(np.random.permutation(ns))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation]
# partition (shuffled_X, shuffled_Y), minus the end case.
num_complete_minibatches = int(math.floor(ns/mini_batch_size)) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]
mini_batch_Y = shuffled_Y[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# handling the end case (last mini-batch < mini_batch_size)
if ns % mini_batch_size != 0:
mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size : ns]
mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size : ns]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
# Forward propagation for prediction
def forward_propagation_for_predict(X, parameters):
# input -- X (dataset used to make prediction), papameters after training
# output -- the output of the last linear unit
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
W4 = parameters['W4']
b4 = parameters['b4']
Z1 = tf.add(tf.matmul(W1, X), b1)
A1 = tf.nn.relu(Z1)
Z2 = tf.add(tf.matmul(W2, A1), b2)
A2 = tf.nn.relu(Z2)
Z3 = tf.add(tf.matmul(W3, A2), b3)
A3 = tf.nn.relu(Z3)
Z4 = tf.add(tf.matmul(W4, A3), b4)
return Z4
# Predict function
def predict(X, parameters):
# input -- X (dataset used to make prediction), papameters after training
# output -- prediction
W1 = tf.convert_to_tensor(parameters["W1"])
b1 = tf.convert_to_tensor(parameters["b1"])
W2 = tf.convert_to_tensor(parameters["W2"])
b2 = tf.convert_to_tensor(parameters["b2"])
W3 = tf.convert_to_tensor(parameters["W3"])
b3 = tf.convert_to_tensor(parameters["b3"])
W4 = tf.convert_to_tensor(parameters["W4"])
b4 = tf.convert_to_tensor(parameters["b4"])
params = {"W1": W1, "b1": b1, "W2": W2, "b2": b2, "W3": W3, "b3": b3, "W4": W4, "b4": b4}
x = tf.placeholder("float")
z4 = forward_propagation_for_predict(x, params)
p = tf.argmax(z4)
sess = tf.Session()
prediction = sess.run(p, feed_dict = {x: X})
return prediction
def predict_probability(X, parameters):
# input -- X (dataset used to make prediction), papameters after training
# output -- prediction
W1 = tf.convert_to_tensor(parameters["W1"])
b1 = tf.convert_to_tensor(parameters["b1"])
W2 = tf.convert_to_tensor(parameters["W2"])
b2 = tf.convert_to_tensor(parameters["b2"])
W3 = tf.convert_to_tensor(parameters["W3"])
b3 = tf.convert_to_tensor(parameters["b3"])
W4 = tf.convert_to_tensor(parameters["W4"])
b4 = tf.convert_to_tensor(parameters["b4"])
params = {"W1": W1, "b1": b1, "W2": W2, "b2": b2, "W3": W3, "b3": b3, "W4": W4, "b4": b4}
x = tf.placeholder("float")
z4 = forward_propagation_for_predict(x, params)
p = tf.nn.softmax(z4, axis=0)
sess = tf.Session()
prediction = sess.run(p, feed_dict = {x: X})
return prediction
# Build the model
def model(X_train, Y_train, X_test, starting_learning_rate = 0.0001, num_epochs = 1500, minibatch_size = 128, print_cost = True):
# input -- X_train (training set), Y_train(training labels), X_test (test set), Y_test (test labels),
# output -- trained parameters
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.set_random_seed(3)
seed = 3
(nf, ns) = X_train.shape
nt = Y_train.shape[0]
costs = []
# create placeholders of shape (nf, nt)
X, Y = create_placeholders(nf, nt)
# initialize parameters
parameters = initialize_parameters(nf=nf, ln1=100, ln2=50, ln3=25, nt=nt)
# forward propagation: build the forward propagation in the tensorflow graph
Z4 = forward_propagation(X, parameters)
# cost function: add cost function to tensorflow graph
cost = compute_cost(Z4, Y, parameters, 0.005)
# Use learning rate decay
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(starting_learning_rate, global_step, 1000, 0.95, staircase=True)
# backpropagation: define the tensorflow optimizer, AdamOptimizer is used.
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
trainer = optimizer.minimize(cost, global_step=global_step)
# initialize all the variables
init = tf.global_variables_initializer()
# start the session to compute the tensorflow graph
with tf.Session() as sess:
# run the initialization
sess.run(init)
# do the training loop
for epoch in range(num_epochs):
epoch_cost = 0.
num_minibatches = int(ns / minibatch_size)
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# run the session to execute the "optimizer" and the "cost", the feedict contains a minibatch for (X,Y).
_ , minibatch_cost = sess.run([trainer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
epoch_cost += minibatch_cost / num_minibatches
# print the cost every epoch
if print_cost == True and (epoch+1) % 5 == 0:
print ("Cost after epoch %i: %f" % (epoch+1, epoch_cost))
costs.append(epoch_cost)
parameters = sess.run(parameters)
print ("Parameters have been trained!")
# calculate the correct predictions
correct_prediction = tf.equal(tf.argmax(Z4), tf.argmax(Y))
# calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
return parameters
if __name__ == '__main__':
startTime = datetime.now()
args = parser.parse_args()
train_batch = pd.read_pickle(args.train_path)
test_batch = pd.read_pickle(args.test_path)
lname = args.column
path = os.path.dirname(args.train_path) + "/ACTINN"
os.system("mkdir -p {}".format(path))
train_mat = train_batch.drop(lname, axis=1).transpose()
train_labels = train_batch[lname]
test_mat = test_batch.drop(lname, axis=1).transpose()
test_labels = test_batch[lname]
mat = train_mat.values
mat_round = np.rint(mat)
error = np.mean(np.abs(mat - mat_round))
isint = error == 0
common_labels = list(set(train_labels) & set(test_labels))
# print(train_mat, train_label)
# sys.exit()
barcode = list(test_mat.columns)
nt = len(set(train_labels))
train_mat, test_mat = scale_sets([train_mat, test_mat], isint=False)
# train_mat = train_mat.values
# test_mat = test_mat.values
type_to_label_dict = type_to_label_dict(train_labels)
label_to_type_dict = {v: k for k, v in type_to_label_dict.items()}
print("Cell Types in training set:", type_to_label_dict)
print("# Training cells:", train_labels.shape[0])
train_labels = convert_type_to_label(train_labels, type_to_label_dict)
train_labels = one_hot_matrix(train_labels, nt)
parameters = model(train_mat, train_labels, test_mat, \
args.learning_rate, args.num_epochs, args.minibatch_size, args.print_cost)
probs = predict_probability(test_mat, parameters)
probs = probs.T
max_prob = np.max(probs, axis=1)
dic2 = {i: probs[:, j] for i, j in type_to_label_dict.items()}
# Print the probabilities
if args.output_probability:
test_predict = pd.DataFrame(predict_probability(test_mat, parameters))
test_predict.index = [label_to_type_dict[x] for x in range(test_predict.shape[0])]
test_predict.columns = barcode
test_predict.to_csv("predicted_probabilities.txt", sep="\t")
test_predict = predict(test_mat, parameters)
predicted_label = []
for i in range(len(test_predict)):
predicted_label.append(label_to_type_dict[test_predict[i]])
f1_scores = metrics.f1_score(list(test_labels), predicted_label, average=None)
median_f1_score = np.median(f1_scores)
mean_f1_score = np.mean(f1_scores)
weighted_f1_score = metrics.f1_score(list(test_labels), predicted_label, average="weighted")
filt_predictions = [predicted_label[i] if max_prob[i] > 0.9 else "Unassigned" for i in range(len(predicted_label))]
dic1 = {"cellname": barcode,
"raw_predictions": predicted_label,
"predictions": filt_predictions,
"labels": list(test_labels)}
dic = {**dic1, **dic2}
predicted_label =
|
pd.DataFrame(dic)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 21 09:55:29 2020
@author: Gary
"""
import pandas as pd
import core.Find_silent_change as fsc
import core.Read_FF as rff
#import difflib
#import core.Construct_set as const_set
output = './out/'
tempdir = './tmp/'
arcdir = './archive/'
upload_diff_ref = output+'upload_diff_ref.csv'
change_log = output+'silent_change_log.csv'
silent_detail = output+'silent_detail.txt'
exclude_files = ['archive_2018_08_28.zip','sky_truth_final.zip']
skyfn = 'sky_truth_final'
def getDfForCompare(fn,sources='./sources/'):
fn = sources+fn
raw_df = rff.Read_FF(zname=fn).import_raw()
raw_df = raw_df[~(raw_df.IngredientKey.isna())]
raw_df = raw_df.drop(['raw_filename','data_source','record_flags'],
axis=1)
return raw_df
def showDifference(uploadlst,olddf, df):
outstr = ''
for uk in uploadlst:
outstr += f' Differences in {uk}\n'
if fsc.compareFrameAsStrings(olddf[olddf.UploadKey==uk],
df[df.UploadKey==uk]):
conc = pd.merge(olddf[olddf.UploadKey==uk],df[df.UploadKey==uk],on='IngredientKey',how='outer',
indicator=True)
cols = df.columns.tolist()
cols.remove('IngredientKey')
for col in cols:
x = col+'_x'
y = col+'_y'
conc['comp'] = conc[x]==conc[y]
if conc.comp.sum()<len(conc):
outstr += f'{conc[~conc.comp][[x,y]]}\n'
outstr += f'{col}, sum = {conc.comp.sum()}\n'
if len(outstr)>0:
print(f' Details available at {silent_detail}')
with open(silent_detail,'w') as f:
f.write(outstr)
def add_to_uploadRef(rec_df):
try:
diff_ref = pd.read_csv(upload_diff_ref)
except:
diff_ref = pd.DataFrame()
diff_ref = pd.concat([rec_df,diff_ref],sort=True)
diff_ref.to_csv(upload_diff_ref,index=False)
def add_to_change_log(clog):
try:
logdf = pd.read_csv(change_log)
except:
logdf = pd.DataFrame()
logdf = pd.concat([clog,logdf])
logdf.to_csv(change_log,index=False)
def startFromScratch():
"""Be aware - this initializes everything before running a LONG process on
all archived files!"""
archives = fsc.createInitialCompareList()
#new = pd.DataFrame({'UploadKey':None,'rhash':None},index=[])
df = pd.DataFrame()
for i,arc in enumerate(archives[:]):
print(f'\nProcessing archive for silent changes:\n {arc}\n')
olddf = df.copy()
df = getDfForCompare(arc[1],sources=arcdir)
if len(olddf)==0: # first run, nothing left to do
continue
oldulk = olddf.UploadKey.unique().tolist()
df = fsc.getNormalizedDF(df)
olddf = fsc.getNormalizedDF(olddf)
ulk = df.UploadKey.unique().tolist()
ukMissingFromNew = []
for uk in oldulk:
if uk not in ulk:
ukMissingFromNew.append(uk)
#print(olddf.columns)
print(f' Number of UploadKeys gone missing in new set: {len(ukMissingFromNew)}')
if len(ukMissingFromNew)>0:
tmp = olddf[olddf.UploadKey.isin(ukMissingFromNew)][['UploadKey','IngredientKey']]
#print(tmp.UploadKey.head())
tmp['ref_fn'] = archives[i-1][1]
tmp['new_fn'] = archives[i][1]
tmp['reason'] = 'UploadKey missing from newer archive'
add_to_uploadRef(tmp)
tmp = tmp.groupby('UploadKey',as_index=False).first()
tmp = tmp.drop('IngredientKey',axis=1)
tmp['ref_date'] = archives[i-1][0]
tmp['new_date'] = archives[i][0]
gb = olddf[olddf.UploadKey.isin(ukMissingFromNew)].groupby('UploadKey',as_index=True)\
[['APINumber','OperatorName','JobEndDate']].first()
tmp = pd.merge(tmp,gb,on='UploadKey')
add_to_change_log(tmp)
# find matching records
mg = pd.merge(olddf,df,on=['UploadKey','IngredientKey'],how='outer',
indicator=True,validate='1:1')
common = mg[mg['_merge']=='both'][['UploadKey','IngredientKey']].copy()
newmg = pd.merge(common,df,on=['UploadKey','IngredientKey'],how='inner')
#print(newmg.columns)
newmg['rhash'] = pd.util.hash_pandas_object(newmg,hash_key='1234').astype('int64')
oldmg = pd.merge(common,olddf,on=['UploadKey','IngredientKey'],how='inner')
#print(oldmg.columns)
oldmg['rhash'] = pd.util.hash_pandas_object(oldmg,hash_key='1234').astype('int64')
pd.concat([newmg.head(),oldmg.head()]).to_csv('./tmp/temp.csv')
print(' Merging old/new with hash values for each row')
hashmg = pd.merge(oldmg[['UploadKey','IngredientKey','rhash']],
newmg[['UploadKey','IngredientKey','rhash']],
on=['UploadKey','IngredientKey'],validate='1:1')
hashmg['rdiff'] = hashmg.rhash_x != hashmg.rhash_y
ulk_diff_list = hashmg[hashmg.rdiff].UploadKey.unique().tolist()
print(f' Number of rows with differing hash values: {hashmg.rdiff.sum()} out of {len(hashmg)}')
print(f' in {len(ulk_diff_list)} disclosure(s)\n')
showDifference(ulk_diff_list,olddf,df)
tmp = hashmg[hashmg.rdiff][['UploadKey','IngredientKey']]
mgulk = hashmg.UploadKey.unique().tolist()
tmp['ref_fn'] = archives[i-1][1]
tmp['new_fn'] = archives[i][1]
tmp['reason'] = 'differing row hash'
add_to_uploadRef(tmp)
tmp = tmp.groupby('UploadKey',as_index=False).first()
tmp = tmp.drop(['IngredientKey'],axis=1)
tmp['ref_date'] = archives[i-1][0]
tmp['new_date'] = archives[i][0]
gb = olddf[olddf.UploadKey.isin(mgulk)].groupby('UploadKey',as_index=True)\
[['APINumber','OperatorName','JobEndDate']].first()
tmp =
|
pd.merge(tmp,gb,on='UploadKey')
|
pandas.merge
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import quantipy as qp
# from matplotlib import pyplot as plt
# import matplotlib.image as mpimg
import string
import pickle
import warnings
try:
import seaborn as sns
from PIL import Image
except:
pass
from quantipy.core.cache import Cache
from quantipy.core.view import View
from quantipy.core.view_generators.view_mapper import ViewMapper
from quantipy.core.view_generators.view_maps import QuantipyViews
from quantipy.core.helpers.functions import emulate_meta
from quantipy.core.tools.view.logic import (has_any, has_all, has_count,
not_any, not_all, not_count,
is_lt, is_ne, is_gt,
is_le, is_eq, is_ge,
union, intersection, get_logic_index)
from quantipy.core.helpers.functions import (paint_dataframe,
emulate_meta,
get_text,
finish_text_key)
from quantipy.core.tools.dp.prep import recode
from quantipy.core.tools.qp_decorators import lazy_property
from operator import add, sub, mul
from operator import truediv as div
#from scipy.stats.stats import _ttest_finish as get_pval
from scipy.stats._stats_py import _ttest_finish as get_pval
from scipy.stats import chi2 as chi2dist
from scipy.stats import f as fdist
from itertools import combinations, chain, product
from collections import defaultdict, OrderedDict, Counter
import gzip
try:
import dill
except:
pass
import json
import copy
import time
import sys
import re
from quantipy.core.rules import Rules
_TOTAL = '@'
_AXES = ['x', 'y']
class ChainManager(object):
def __init__(self, stack):
self.stack = stack
self.__chains = []
self.source = 'native'
self.build_info = {}
self._hidden = []
def __str__(self):
return '\n'.join([chain.__str__() for chain in self])
def __repr__(self):
return self.__str__()
def __getitem__(self, value):
if isinstance(value, str):
element = self.__chains[self._idx_from_name(value)]
is_folder = isinstance(element, dict)
if is_folder:
return list(element.values())[0]
else:
return element
else:
return self.__chains[value]
def __len__(self):
"""returns the number of cached Chains"""
return len(self.__chains)
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n < self.__len__():
obj = self[self.n]
self.n += 1
return obj
else:
raise StopIteration
next = __next__
def add_chain(self, chain):
self.__chains.append(chain)
@property
def folders(self):
"""
Folder indices, names and number of stored ``qp.Chain`` items (as tuples).
"""
return [(self.__chains.index(f), list(f.keys())[0], len(list(f.values())[0]))
for f in self if isinstance(f, dict)]
@property
def singles(self):
"""
The list of all non-folder ``qp.Chain`` indices and names (as tuples).
"""
return list(zip(self.single_idxs, self.single_names))
@property
def chains(self):
"""
The flattened list of all ``qp.Chain`` items of self.
"""
all_chains = []
for c in self:
if isinstance(c, dict):
all_chains.extend(list(c.values())[0])
else:
all_chains.append(c)
return all_chains
@property
def folder_idxs(self):
"""
The folders' index positions in self.
"""
return [f[0] for f in self.folders]
@property
def folder_names(self):
"""
The folders' names from self.
"""
return [f[1] for f in self.folders]
@property
def single_idxs(self):
"""
The ``qp.Chain`` instances' index positions in self.
"""
return [self.__chains.index(c) for c in self if isinstance(c, Chain)]
@property
def single_names(self):
"""
The ``qp.Chain`` instances' names.
"""
return [s.name for s in self if isinstance(s, Chain)]
@property
def hidden(self):
"""
All ``qp.Chain`` elements that are hidden.
"""
return [c.name for c in self.chains if c.hidden]
@property
def hidden_folders(self):
"""
All hidden folders.
"""
return [n for n in self._hidden if n in self.folder_names]
def _content_structure(self):
return ['folder' if isinstance(k, dict) else 'single' for k in self]
def _singles_to_idx(self):
return {name: i for i, name in list(self._idx_to_singles().items())}
def _idx_to_singles(self):
return dict(self.singles)
def _idx_fold(self):
return dict([(f[0], f[1]) for f in self.folders])
def _folders_to_idx(self):
return {name: i for i, name in list(self._idx_fold().items())}
def _names(self, unroll=False):
if not unroll:
return self.folder_names + self.single_names
else:
return [c.name for c in self.chains]
def _idxs_to_names(self):
singles = self.singles
folders = [(f[0], f[1]) for f in self.folders]
return dict(singles + folders)
def _names_to_idxs(self):
return {n: i for i, n in list(self._idxs_to_names().items())}
def _name_from_idx(self, name):
return self._idxs_to_names()[name]
def _idx_from_name(self, idx):
return self._names_to_idxs()[idx]
def _is_folder_ref(self, ref):
return ref in self._folders_to_idx() or ref in self._idx_fold()
def _is_single_ref(self, ref):
return ref in self._singles_to_idx or ref in self._idx_to_singles()
def _uniquify_names(self):
all_names = Counter(self.single_names + self.folder_names)
single_name_occ = Counter(self.single_names)
folder_name_occ = {folder: Counter([c.name for c in self[folder]])
for folder in self.folder_names}
for struct_name in all_names:
if struct_name in folder_name_occ:
iter_over = folder_name_occ[struct_name]
is_folder = struct_name
else:
iter_over = single_name_occ
is_folder = False
for name, occ in list(iter_over.items()):
if occ > 1:
new_names = ['{}_{}'.format(name, i) for i in range(1, occ + 1)]
idx = [s[0] for s in self.singles if s[1] == name]
pairs = list(zip(idx, new_names))
if is_folder:
for idx, c in enumerate(self[is_folder]):
c.name = pairs[idx][1]
else:
for p in pairs:
self.__chains[p[0]].name = p[1]
return None
def _set_to_folderitems(self, folder):
"""
Will keep only the ``values()`` ``qp.Chain`` item list from the named
folder. Use this for within-folder-operations...
"""
if not folder in self.folder_names:
err = "A folder named '{}' does not exist!".format(folder)
raise KeyError(err)
else:
org_chains = self.__chains[:]
org_index = self._idx_from_name(folder)
self.__chains = self[folder]
return org_chains, org_index
def _rebuild_org_folder(self, folder, items, index):
"""
After a within-folder-operation this method is using the returns
of ``_set_to_folderitems`` to rebuild the originating folder.
"""
self.fold(folder)
new_folder = self.__chains[:]
self.__chains = items
self.__chains[index] = new_folder[0]
return None
@staticmethod
def _dupes_in_chainref(chain_refs):
return len(set(chain_refs)) != len(chain_refs)
def _check_equality(self, other, return_diffs=True):
"""
"""
chains1 = self.chains
chains2 = other.chains
diffs = {}
if not len(chains1) == len(chains2):
return False
else:
paired = list(zip(chains1, chains2))
for c1, c2 in paired:
atts1 = c1.__dict__
atts2 = c2.__dict__
for att in list(atts1.keys()):
if isinstance(atts1[att], (pd.DataFrame, pd.Index)):
if not atts1[att].equals(atts2[att]):
diffs[att] = [atts1[att], atts2[att]]
else:
if atts1[att] != atts2[att]:
diffs[att] = [atts1[att], atts2[att]]
return diffs if return_diffs else not diffs
def _test_same_structure(self, other):
"""
"""
folders1 = self.folders
singles1 = self.singles
folders2 = other.folders
singles2 = other.singles
if (folders1 != folders2 or singles1 != singles2):
return False
else:
return True
def equals(self, other):
"""
Test equality of self to another ``ChainManager`` object instance.
.. note::
Only the flattened list of ``Chain`` objects stored are tested, i.e.
any folder structure differences are ignored. Use ``compare()`` for
a more detailed comparison.
Parameters
----------
other : ``qp.ChainManager``
Another ``ChainManager`` object to compare.
Returns
-------
equality : bool
"""
return self._check_equality(other, False)
def compare(self, other, strict=True, full_summary=True):
"""
Compare structure and content of self to another ``ChainManager`` instance.
Parameters
----------
other : ``qp.ChainManager``
Another ``ChainManager`` object to compare.
strict : bool, default True
Test if the structure of folders vs. single Chain objects is the
same in both ChainManager instances.
full_summary : bool, default True
``False`` will disable the detailed comparison ``pd.DataFrame``
that informs about differences between the objects.
Returns
-------
result : str
A brief feedback message about the comparison results.
"""
diffs = []
if strict:
same_structure = self._test_same_structure(other)
if not same_structure:
diffs.append('s')
check = self._check_equality(other)
if isinstance(check, bool):
diffs.append('l')
else:
if check: diffs.append('c')
report_full = ['_frame', '_x_keys', '_y_keys', 'index', '_columns',
'base_descriptions', 'annotations']
diffs_in = ''
if diffs:
if 'l' in diffs:
diffs_in += '\n -Length (number of stored Chain objects)'
if 's' in diffs:
diffs_in += '\n -Structure (folders and/or single Chain order)'
if 'c' in diffs:
diffs_in += '\n -Chain elements (properties and content of Chain objects)'
if diffs_in:
result = 'ChainManagers are not identical:\n'
result += '--------------------------------' + diffs_in
else:
result = 'ChainManagers are identical.'
print(result)
return None
def save(self, path, keep_stack=False):
"""
"""
if not keep_stack:
del self.stack
self.stack = None
f = open(path, 'wb')
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
f.close()
return None
@staticmethod
def load(path):
"""
"""
f = open(path, 'rb')
obj = pickle.load(f)
f.close()
return obj
def _toggle_vis(self, chains, mode='hide'):
if not isinstance(chains, list): chains = [chains]
for chain in chains:
if isinstance(chain, dict):
fname = list(chain.keys())[0]
elements = list(chain.values())[0]
fidx = self._idx_from_name(fname)
folder = self[fidx][fname]
for c in folder:
if c.name in elements:
c.hidden = True if mode == 'hide' else False
if mode == 'hide' and not c.name in self._hidden:
self._hidden.append(c.name)
if mode == 'unhide' and c.name in self._hidden:
self._hidden.remove(c.name)
else:
if chain in self.folder_names:
for c in self[chain]:
c.hidden = True if mode == 'hide' else False
else:
self[chain].hidden = True if mode == 'hide' else False
if mode == 'hide':
if not chain in self._hidden:
self._hidden.append(chain)
else:
if chain in self._hidden:
self._hidden.remove(chain)
return None
def hide(self, chains):
"""
Flag elements as being hidden.
Parameters
----------
chains : (list) of int and/or str or dict
The ``qp.Chain`` item and/or folder names to hide. To hide *within*
a folder use a dict to map the desired Chain names to the belonging
folder name.
Returns
-------
None
"""
self._toggle_vis(chains, 'hide')
return None
def unhide(self, chains=None):
"""
Unhide elements that have been set as ``hidden``.
Parameters
----------
chains : (list) of int and/or str or dict, default None
The ``qp.Chain`` item and/or folder names to unhide. To unhide *within*
a folder use a dict to map the desired Chain names to the belonging
folder name. If not provided, all hidden elements will be unhidden.
Returns
-------
None
"""
if not chains: chains = self.folder_names + self.single_names
self._toggle_vis(chains, 'unhide')
return None
def clone(self):
"""
Return a full (deep) copy of self.
"""
return copy.deepcopy(self)
def insert(self, other_cm, index=-1, safe_names=False):
"""
Add elements from another ``ChainManager`` instance to self.
Parameters
----------
other_cm : ``quantipy.ChainManager``
A ChainManager instance to draw the elements from.
index : int, default -1
The positional index after which new elements will be added.
Defaults to -1, i.e. elements are appended at the end.
safe_names : bool, default False
If True and any duplicated element names are found after the
operation, names will be made unique (by appending '_1', '_2', '_3',
etc.).
Returns
------
None
"""
if not isinstance(other_cm, ChainManager):
raise ValueError("other_cm must be a quantipy.ChainManager instance.")
if not index == -1:
before_c = self.__chains[:index+1]
after_c = self.__chains[index+1:]
new_chains = before_c + other_cm.__chains + after_c
self.__chains = new_chains
else:
self.__chains.extend(other_cm.__chains)
if safe_names: self._uniquify_names()
return None
def merge(self, folders, new_name=None, drop=True):
"""
Unite the items of two or more folders, optionally providing a new name.
If duplicated ``qp.Chain`` items are found, the first instance will be
kept. The merged folder will take the place of the first folder named
in ``folders``.
Parameters
----------
folders : list of int and/or str
The folders to merge refernced by their positional index or by name.
new_name : str, default None
Use this as the merged folder's name. If not provided, the name
of the first folder in ``folders`` will be used instead.
drop : bool, default True
If ``False``, the original folders will be kept alongside the
new merged one.
Returns
-------
None
"""
if not isinstance(folders, list):
err = "'folders' must be a list of folder references!"
raise TypeError(err)
if len(folders) == 1:
err = "'folders' must contain at least two folder names!"
raise ValueError(err)
if not all(self._is_folder_ref(f) for f in folders):
err = "One or more folder names from 'folders' do not exist!"
ValueError(err)
folders = [f if isinstance(f, str) else self._name_from_idx(f)
for f in folders]
folder_idx = self._idx_from_name(folders[0])
if not new_name: new_name = folders[0]
merged_items = []
seen_names = []
for folder in folders:
for chain in self[folder]:
if not chain.name in seen_names:
merged_items.append(chain)
seen_names.append(chain.name)
if drop:
self.__chains[folder_idx] = {new_name: merged_items}
remove_folders = folders[1:] if new_name != folders[0] else folders
for r in remove_folders:
self.remove(r)
else:
start = self.__chains[:folder_idx]
end = self.__chains[folder_idx:]
self.__chains = start + [{new_name: merged_items}] + end
return None
def fold(self, folder_name=None, chains=None):
"""
Arrange non-``dict`` structured ``qp.Chain`` items in folders.
All separate ``qp.Chain`` items will be mapped to their ``name``
property being the ``key`` in the transformed ``dict`` structure.
Parameters
----------
folder_name : str, default None
Collect all items in a folder keyed by the provided name. If the
key already exists, the items will be appended to the ``dict``
values.
chains : (list) of int and/or str, default None
Select specific ``qp.Chain`` items by providing their positional
indices or ``name`` property value for moving only a subset to the
folder.
Returns
-------
None
"""
if chains:
if not isinstance(chains, list): chains = [chains]
if any(self._is_folder_ref(c) for c in chains):
err = 'Cannot build folder from other folders!'
raise ValueError(err)
all_chain_names = []
singles = []
for c in chains:
if isinstance(c, str):
all_chain_names.append(c)
elif isinstance(c, int) and c in self._idx_to_singles():
all_chain_names.append(self._idx_to_singles()[c])
for c in all_chain_names:
singles.append(self[self._singles_to_idx()[c]])
else:
singles = [s for s in self if isinstance(s, Chain)]
if self._dupes_in_chainref(singles):
err = "Cannot build folder from duplicate qp.Chain references: {}"
raise ValueError(err.format(singles))
for s in singles:
if folder_name:
if folder_name in self.folder_names:
self[folder_name].append(s)
else:
self.__chains.append({folder_name: [s]})
del self.__chains[self._singles_to_idx()[s.name]]
else:
self.__chains[self._singles_to_idx()[s.name]] = {s.name: [s]}
return None
def unfold(self, folder=None):
"""
Remove folder but keep the collected items.
The items will be added starting at the old index position of the
original folder.
Parameters
----------
folder : (list of) str, default None
The name of the folder to drop and extract items from. If not
provided all folders will be unfolded.
Returns
-------
None
"""
if not folder:
folder = self.folder_names
else:
if not isinstance(folder, list): folder = [folder]
invalid = [f for f in folder if f not in self.folder_names]
if invalid:
err = "Folder(s) named '{}' not found.".format(invalid)
raise KeyError(err)
for f in folder:
old_pos = self._idx_from_name(f)
items = self[f]
start = self.__chains[: old_pos]
end = self.__chains[old_pos + 1: ]
self.__chains = start + items + end
return None
def remove(self, chains, folder=None, inplace=True):
"""
Remove (folders of) ``qp.Chain`` items by providing a list of indices
or names.
Parameters
----------
chains : (list) of int and/or str
``qp.Chain`` items or folders by provided by their positional
indices or ``name`` property.
folder : str, default None
If a folder name is provided, items will be dropped within that
folder only instead of removing all found instances.
inplace : bool, default True
By default the new order is applied inplace, set to ``False`` to
return a new object instead.
Returns
-------
None
"""
if inplace:
cm = self
else:
cm = self.clone()
if folder:
org_chains, org_index = cm._set_to_folderitems(folder)
if not isinstance(chains, list): chains = [chains]
remove_idxs= [c if isinstance(c, int) else cm._idx_from_name(c)
for c in chains]
if cm._dupes_in_chainref(remove_idxs):
err = "Cannot remove with duplicate chain references: {}"
raise ValueError(err.format(remove_idxs))
new_items = []
for pos, c in enumerate(cm):
if not pos in remove_idxs: new_items.append(c)
cm.__chains = new_items
if folder: cm._rebuild_org_folder(folder, org_chains, org_index)
if inplace:
return None
else:
return cm
def cut(self, values, ci=None, base=False, tests=False):
"""
Isolate selected axis values in the ``Chain.dataframe``.
Parameters
----------
values : (list of) str
The string must indicate the raw (i.e. the unpainted) second level
axis value, e.g. ``'mean'``, ``'net_1'``, etc.
ci : {'counts', 'c%', None}, default None
The cell item version to target if multiple frequency representations
are present.
base : bool, default False
Controls keeping any existing base view aggregations.
tests : bool, default False
Controls keeping any existing significance test view aggregations.
Returns
-------
None
"""
if not isinstance(values, list): values = [values]
if 'cbase' in values:
values[values.index('cbase')] = 'All'
if base and not 'All' in values:
values = ['All'] + values
for c in self.chains:
# force ci parameter for proper targeting on array summaries...
if c.array_style == 0 and ci is None:
_ci = c.cell_items.split('_')[0]
if not _ci.startswith('counts'):
ci = '%'
else:
ci = 'counts'
if c.sig_test_letters: c._remove_letter_header()
idxs, names, order = c._view_idxs(
values, keep_tests=tests, keep_bases=base, names=True, ci=ci)
idxs = [i for _, i in sorted(zip(order, idxs))]
names = [n for _, n in sorted(zip(order, names))]
if c.ci_count > 1: c._non_grouped_axis()
if c.array_style == 0:
c._fill_cells()
start, repeat = c._row_pattern(ci)
c._frame = c._frame.iloc[start::repeat, idxs]
else:
c._frame = c._frame.iloc[idxs, :]
c.index = c._slice_edited_index(c.index, idxs)
new_views = OrderedDict()
for v in c.views.copy():
if not v in names:
del c._views[v]
else:
c._views[v] = names.count(v)
if not c._array_style == 0:
if not tests:
c.sig_test_letters = None
else:
c._frame = c._apply_letter_header(c._frame)
c.edited = True
return None
def join(self, title='Summary'):
"""
Join **all** ``qp.Chain```elements, concatenating along the matching axis.
Parameters
----------
title : {str, 'auto'}, default 'Summary'
The new title for the joined axis' index representation.
Returns
-------
None
"""
custom_views = []
self.unfold()
chains = self.chains
totalmul = len(chains[0]._frame.columns.get_level_values(0).tolist())
concat_dfs = []
new_labels = []
for c in chains:
new_label = []
if c.sig_test_letters:
c._remove_letter_header()
c._frame = c._apply_letter_header(c._frame)
df = c.dataframe
if not c.array_style == 0:
new_label.append(df.index.get_level_values(0).values.tolist()[0])
new_label.extend((len(c.describe()) - 1) * [''])
else:
new_label.extend(df.index.get_level_values(1).values.tolist())
names = ['Question', 'Values']
join_idx = pd.MultiIndex.from_product([[title], new_label], names=names)
df.index = join_idx
df.rename(columns={c._x_keys[0]: 'Total'}, inplace=True)
if not c.array_style == 0:
custom_views.extend(c._views_per_rows())
else:
df.columns.set_levels(levels=[title]*totalmul, level=0, inplace=True)
concat_dfs.append(df)
new_df = pd.concat(concat_dfs, axis=0, join='inner')
self.chains[0]._frame = new_df
self.reorder([0])
self.rename({self.single_names[0]: title})
self.fold()
self.chains[0]._custom_views = custom_views
return None
def reorder(self, order, folder=None, inplace=True):
"""
Reorder (folders of) ``qp.Chain`` items by providing a list of new
indices or names.
Parameters
----------
order : list of int and/or str
The folder or ``qp.Chain`` references to determine the new order
of items. Any items not referenced will be removed from the new
order.
folder : str, default None
If a folder name is provided, items will be sorted within that
folder instead of applying the sorting to the general items
collection.
inplace : bool, default True
By default the new order is applied inplace, set to ``False`` to
return a new object instead.
Returns
-------
None
"""
if inplace:
cm = self
else:
cm = self.clone()
if folder:
org_chains, org_index = self._set_to_folderitems(folder)
if not isinstance(order, list):
err = "'order' must be a list!"
raise ValueError(err)
new_idx_order = []
for o in order:
if isinstance(o, int):
new_idx_order.append(o)
else:
new_idx_order.append(self._idx_from_name(o))
if cm._dupes_in_chainref(new_idx_order):
err = "Cannot reorder from duplicate qp.Chain references: {}"
raise ValueError(err.format(new_idx_order))
items = [self.__chains[idx] for idx in new_idx_order]
cm.__chains = items
if folder: cm._rebuild_org_folder(folder, org_chains, org_index)
if inplace:
return None
else:
return cm
def rename(self, names, folder=None):
"""
Rename (folders of) ``qp.Chain`` items by providing a mapping of old
to new keys.
Parameters
----------
names : dict
Maps existing names to the desired new ones, i.e.
{'old name': 'new names'} pairs need to be provided.
folder : str, default None
If a folder name is provided, new names will only be applied
within that folder. This is without effect if all ``qp.Chain.name``
properties across the items are unique.
Returns
-------
None
"""
if not isinstance(names, dict):
err = "''names' must be a dict of old_name: new_name pairs."
raise ValueError(err)
if folder and not folder in self.folder_names:
err = "A folder named '{}' does not exist!".format(folder)
raise KeyError(err)
for old, new in list(names.items()):
no_folder_name = folder and not old in self._names(False)
no_name_across = not folder and not old in self._names(True)
if no_folder_name and no_name_across:
err = "'{}' is not an existing folder or ``qp.Chain`` name!"
raise KeyError(err.format(old))
else:
within_folder = old not in self._names(False)
if not within_folder:
idx = self._idx_from_name(old)
if not isinstance(self.__chains[idx], dict):
self.__chains[idx].name = new
else:
self.__chains[idx] = {new: self[old][:]}
else:
iter_over = self[folder] if folder else self.chains
for c in iter_over:
if c.name == old: c.name = new
return None
def _native_stat_names(self, idxvals_list, text_key=None):
"""
"""
if not text_key: text_key = 'en-GB'
replacements = {
'en-GB': {
'Weighted N': 'Base', # Crunch
'N': 'Base', # Crunch
'Mean': 'Mean', # Dims
'StdDev': 'Std. dev', # Dims
'StdErr': 'Std. err. of mean', # Dims
'SampleVar': 'Sample variance' # Dims
},
}
native_stat_names = []
for val in idxvals_list:
if val in replacements[text_key]:
native_stat_names.append(replacements[text_key][val])
else:
native_stat_names.append(val)
return native_stat_names
def _get_ykey_mapping(self):
ys = []
letters = string.ascii_uppercase + string.ascii_lowercase
for c in self.chains:
if c._y_keys not in ys:
ys.append(c._y_keys)
return list(zip(ys, letters))
def describe(self, by_folder=False, show_hidden=False):
"""
Get a structual summary of all ``qp.Chain`` instances found in self.
Parameters
----------
by_folder : bool, default False
If True, only information on ``dict``-structured (folder-like)
``qp.Chain`` items is shown, multiindexed by folder names and item
enumerations.
show_hidden : bool, default False
If True, the summary will also include elements that have been set
hidden using ``self.hide()``.
Returns
-------
None
"""
folders = []
folder_items = []
variables = []
names = []
array_sum = []
sources = []
banner_ids = []
item_pos = []
hidden = []
bannermap = self._get_ykey_mapping()
for pos, chains in enumerate(self):
is_folder = isinstance(chains, dict)
if is_folder:
folder_name = list(chains.keys())
chains = list(chains.values())[0]
folder_items.extend(list(range(0, len(chains))))
item_pos.extend([pos] * len(chains))
else:
chains = [chains]
folder_name = [None]
folder_items.append(None)
item_pos.append(pos)
if chains[0].structure is None:
variables.extend([c._x_keys[0] for c in chains])
names.extend([c.name for c in chains])
folders.extend(folder_name * len(chains))
array_sum.extend([True if c.array_style > -1 else False
for c in chains])
sources.extend(c.source if not c.edited else 'edited'
for c in chains)
for c in chains:
for m in bannermap:
if m[0] == c._y_keys: banner_ids.append(m[1])
else:
variables.extend([chains[0].name])
names.extend([chains[0].name])
folders.extend(folder_name)
array_sum.extend([False])
sources.extend(c.source for c in chains)
banner_ids.append(None)
for c in chains:
if c.hidden:
hidden.append(True)
else:
hidden.append(False)
df_data = [item_pos,
names,
folders,
folder_items,
variables,
sources,
banner_ids,
array_sum,
hidden]
df_cols = ['Position',
'Name',
'Folder',
'Item',
'Variable',
'Source',
'Banner id',
'Array',
'Hidden']
df = pd.DataFrame(df_data).T
df.columns = df_cols
if by_folder:
df = df.set_index(['Position', 'Folder', 'Item'])
if not show_hidden:
df = df[df['Hidden'] == False][df.columns[:-1]]
return df
def from_mtd(self, mtd_doc, ignore=None, paint=True, flatten=False):
"""
Convert a Dimensions table document (.mtd) into a collection of
quantipy.Chain representations.
Parameters
----------
mtd_doc : (pandified) .mtd
A Dimensions .mtd file or the returned result of ``pandify_mtd()``.
A "pandified" .mtd consists of ``dict`` of ``pandas.DataFrame``
and metadata ``dict``. Additional text here...
ignore : bool, default False
Text
labels : bool, default True
Text
flatten : bool, default False
Text
Returns
-------
self : quantipy.ChainManager
Will consist of Quantipy representations of the pandas-converted
.mtd file.
"""
def relabel_axes(df, meta, sigtested, labels=True):
"""
"""
for axis in ['x', 'y']:
if axis == 'x':
transf_axis = df.index
else:
transf_axis = df.columns
levels = transf_axis.nlevels
axis_meta = 'index-emetas' if axis == 'x' else 'columns-emetas'
for l in range(0, levels):
if not (sigtested and axis == 'y' and l == levels -1):
org_vals = transf_axis.get_level_values(l).tolist()
org_names = [ov.split('|')[0] for ov in org_vals]
org_labs = [ov.split('|')[1] for ov in org_vals]
new_vals = org_labs if labels else org_names
if l > 0:
for no, axmeta in enumerate(meta[axis_meta]):
if axmeta['Type'] != 'Category':
new_vals[no] = axmeta['Type']
new_vals = self._native_stat_names(new_vals)
rename_dict = {old: new for old, new in zip(org_vals, new_vals)}
if axis == 'x':
df.rename(index=rename_dict, inplace=True)
df.index.names = ['Question', 'Values'] * (levels / 2)
else:
df.rename(columns=rename_dict, inplace=True)
if sigtested:
df.columns.names = (['Question', 'Values'] * (levels / 2) +
['Test-IDs'])
else:
df.columns.names = ['Question', 'Values'] * (levels / 2)
return None
def split_tab(tab):
"""
"""
df, meta = tab['df'], tab['tmeta']
mtd_slicer = df.index.get_level_values(0)
meta_limits = list(OrderedDict(
(i, mtd_slicer.tolist().count(i)) for i in mtd_slicer).values())
meta_slices = []
for start, end in enumerate(meta_limits):
if start == 0:
i_0 = 0
else:
i_0 = meta_limits[start-1]
meta_slices.append((i_0, end))
df_slicers = []
for e in mtd_slicer:
if not e in df_slicers:
df_slicers.append(e)
dfs = [df.loc[[s], :].copy() for s in df_slicers]
sub_metas = []
for ms in meta_slices:
all_meta = copy.deepcopy(meta)
idx_meta = all_meta['index-emetas'][ms[0]: ms[1]]
all_meta['index-emetas'] = idx_meta
sub_metas.append(all_meta)
return list(zip(dfs, sub_metas))
def _get_axis_vars(df):
axis_vars = []
for axis in [df.index, df.columns]:
ax_var = [v.split('|')[0] for v in axis.unique().levels[0]]
axis_vars.append(ax_var)
return axis_vars[0][0], axis_vars[1]
def to_chain(basic_chain_defintion, add_chain_meta):
new_chain = Chain(None, basic_chain_defintion[1])
new_chain.source = 'Dimensions MTD'
new_chain.stack = None
new_chain.painted = True
new_chain._meta = add_chain_meta
new_chain._frame = basic_chain_defintion[0]
new_chain._x_keys = [basic_chain_defintion[1]]
new_chain._y_keys = basic_chain_defintion[2]
new_chain._given_views = None
new_chain._grp_text_map = []
new_chain._text_map = None
# new_chain._pad_id = None
# new_chain._array_style = None
new_chain._has_rules = False
# new_chain.double_base = False
# new_chain.sig_test_letters = None
# new_chain.totalize = True
# new_chain._meta['var_meta'] = basic_chain_defintion[-1]
# new_chain._extract_base_descriptions()
new_chain._views = OrderedDict()
new_chain._views_per_rows()
for vk in new_chain._views_per_rows():
if not vk in new_chain._views:
new_chain._views[vk] = new_chain._views_per_rows().count(vk)
return new_chain
def mine_mtd(tab_collection, paint, chain_coll, folder=None):
failed = []
unsupported = []
for name, sub_tab in list(tab_collection.items()):
try:
if isinstance(list(sub_tab.values())[0], dict):
mine_mtd(sub_tab, paint, chain_coll, name)
else:
tabs = split_tab(sub_tab)
chain_dfs = []
for tab in tabs:
df, meta = tab[0], tab[1]
nestex_x = None
nested_y = (df.columns.nlevels % 2 == 0
and df.columns.nlevels > 2)
sigtested = (df.columns.nlevels % 2 != 0
and df.columns.nlevels > 2)
if sigtested:
df = df.swaplevel(0, axis=1).swaplevel(0, 1, 1)
else:
invalid = ['-', '*', '**']
df = df.applymap(
lambda x: float(x.replace(',', '.').replace('%', ''))
if isinstance(x, str) and not x in invalid
else x
)
x, y = _get_axis_vars(df)
df.replace('-', np.NaN, inplace=True)
relabel_axes(df, meta, sigtested, labels=paint)
colbase_l = -2 if sigtested else -1
for base in ['Base', 'UnweightedBase']:
df = df.drop(base, axis=1, level=colbase_l)
chain = to_chain((df, x, y), meta)
chain.name = name
chain_dfs.append(chain)
if not folder:
chain_coll.extend(chain_dfs)
else:
folders = [(i, list(c.keys())[0]) for i, c in
enumerate(chain_coll, 0) if
isinstance(c, dict)]
if folder in [f[1] for f in folders]:
pos = [f[0] for f in folders
if f[1] == folder][0]
chain_coll[pos][folder].extend(chain_dfs)
else:
chain_coll.append({folder: chain_dfs})
except:
failed.append(name)
return chain_coll
chain_coll = []
chains = mine_mtd(mtd_doc, paint, chain_coll)
self.__chains = chains
return self
def from_cmt(self, crunch_tabbook, ignore=None, cell_items='c',
array_summaries=True):
"""
Convert a Crunch multitable document (tabbook) into a collection of
quantipy.Chain representations.
Parameters
----------
crunch_tabbook : ``Tabbook`` object instance
Text
ignore : bool, default False
Text
cell_items : {'c', 'p', 'cp'}, default 'c'
Text
array_summaries : bool, default True
Text
Returns
-------
self : quantipy.ChainManager
Will consist of Quantipy representations of the Crunch table
document.
"""
def cubegroups_to_chain_defs(cubegroups, ci, arr_sum):
"""
Convert CubeGroup DataFrame to a Chain.dataframe.
"""
chain_dfs = []
# DataFrame edits to get basic Chain.dataframe rep.
for idx, cubegroup in enumerate(cubegroups):
cubegroup_df = cubegroup.dataframe
array = cubegroup.is_array
# split arrays into separate dfs / convert to summary df...
if array:
ai_aliases = cubegroup.subref_aliases
array_elements = []
dfs = []
if array_summaries:
arr_sum_df = cubegroup_df.copy().unstack()['All']
arr_sum_df.is_summary = True
x_label = arr_sum_df.index.get_level_values(0).tolist()[0]
x_name = cubegroup.rowdim.alias
dfs.append((arr_sum_df, x_label, x_name))
array_elements = cubegroup_df.index.levels[1].values.tolist()
ai_df = cubegroup_df.copy()
idx = cubegroup_df.index.droplevel(0)
ai_df.index = idx
for array_element, alias in zip(array_elements, ai_aliases):
dfs.append((ai_df.loc[[array_element], :].copy(),
array_element, alias))
else:
x_label = cubegroup_df.index.get_level_values(0).tolist()[0]
x_name = cubegroup.rowdim.alias
dfs = [(cubegroup_df, x_label, x_name)]
# Apply QP-style DataFrame conventions (indexing, names, etc.)
for cgdf, x_var_label, x_var_name in dfs:
is_summary = hasattr(cgdf, 'is_summary')
if is_summary:
cgdf = cgdf.T
y_var_names = ['@']
x_names = ['Question', 'Values']
y_names = ['Array', 'Questions']
else:
y_var_names = cubegroup.colvars
x_names = ['Question', 'Values']
y_names = ['Question', 'Values']
cgdf.index = cgdf.index.droplevel(0)
# Compute percentages?
if cell_items == 'p': _calc_pct(cgdf)
# Build x-axis multiindex / rearrange "Base" row
idx_vals = cgdf.index.values.tolist()
cgdf = cgdf.reindex([idx_vals[-1]] + idx_vals[:-1])
idx_vals = cgdf.index.values.tolist()
mi_vals = [[x_var_label], self._native_stat_names(idx_vals)]
row_mi = pd.MultiIndex.from_product(mi_vals, names=x_names)
cgdf.index = row_mi
# Build y-axis multiindex
y_vals = [('Total', 'Total') if y[0] == 'All'
else y for y in cgdf.columns.tolist()]
col_mi = pd.MultiIndex.from_tuples(y_vals, names=y_names)
cgdf.columns = col_mi
if is_summary:
cgdf = cgdf.T
chain_dfs.append((cgdf, x_var_name, y_var_names, cubegroup._meta))
return chain_dfs
def _calc_pct(df):
df.iloc[:-1, :] = df.iloc[:-1, :].div(df.iloc[-1, :]) * 100
return None
def to_chain(basic_chain_defintion, add_chain_meta):
"""
"""
new_chain = Chain(None, basic_chain_defintion[1])
new_chain.source = 'Crunch multitable'
new_chain.stack = None
new_chain.painted = True
new_chain._meta = add_chain_meta
new_chain._frame = basic_chain_defintion[0]
new_chain._x_keys = [basic_chain_defintion[1]]
new_chain._y_keys = basic_chain_defintion[2]
new_chain._given_views = None
new_chain._grp_text_map = []
new_chain._text_map = None
new_chain._pad_id = None
new_chain._array_style = None
new_chain._has_rules = False
new_chain.double_base = False
new_chain.sig_test_letters = None
new_chain.totalize = True
new_chain._meta['var_meta'] = basic_chain_defintion[-1]
new_chain._extract_base_descriptions()
new_chain._views = OrderedDict()
for vk in new_chain._views_per_rows():
if not vk in new_chain._views:
new_chain._views[vk] = new_chain._views_per_rows().count(vk)
return new_chain
# self.name = name OK!
# self._meta = Crunch meta OK!
# self._x_keys = None OK!
# self._y_keys = None OK!
# self._frame = None OK!
# self.totalize = False OK! -> But is True!
# self.stack = stack OK! -> N/A
# self._has_rules = None OK! -> N/A
# self.double_base = False OK! -> N/A
# self.sig_test_letters = None OK! -> N/A
# self._pad_id = None OK! -> N/A
# self._given_views = None OK! -> N/A
# self._grp_text_map = [] OK! -> N/A
# self._text_map = None OK! -> N/A
# self.grouping = None ?
# self._group_style = None ?
# self._transl = qp.core.view.View._metric_name_map() * with CMT/MTD
self.source = 'Crunch multitable'
cubegroups = crunch_tabbook.cube_groups
meta = {'display_settings': crunch_tabbook.display_settings,
'weight': crunch_tabbook.weight}
if cell_items == 'c':
meta['display_settings']['countsOrPercents'] = 'counts'
elif cell_items == 'p':
meta['display_settings']['countsOrPercents'] = 'percent'
chain_defs = cubegroups_to_chain_defs(cubegroups, cell_items,
array_summaries)
self.__chains = [to_chain(c_def, meta) for c_def in chain_defs]
return self
# ------------------------------------------------------------------------
def from_cluster(self, clusters):
"""
Create an OrderedDict of ``Cluster`` names storing new ``Chain``\s.
Parameters
----------
clusters : cluster-like ([dict of] quantipy.Cluster)
Text ...
Returns
-------
new_chain_dict : OrderedDict
Text ...
"""
self.source = 'native (old qp.Cluster of qp.Chain)'
qp.set_option('new_chains', True)
def check_cell_items(views):
c = any('counts' in view.split('|')[-1] for view in views)
p = any('c%' in view.split('|')[-1] for view in views)
cp = c and p
if cp:
cell_items = 'counts_colpct'
else:
cell_items = 'counts' if c else 'colpct'
return cell_items
def check_sigtest(views):
"""
"""
levels = []
sigs = [v.split('|')[1] for v in views if v.split('|')[1].startswith('t.')]
for sig in sigs:
l = '0.{}'.format(sig.split('.')[-1])
if not l in levels: levels.append(l)
return levels
def mine_chain_structure(clusters):
cluster_defs = []
for cluster_def_name, cluster in list(clusters.items()):
for name in cluster:
if isinstance(list(cluster[name].items())[0][1], pd.DataFrame):
cluster_def = {'name': name,
'oe': True,
'df': list(cluster[name].items())[0][1],
'filter': chain.filter,
'data_key': chain.data_key}
else:
xs, views, weight = [], [], []
for chain_name, chain in list(cluster[name].items()):
for v in chain.views:
w = v.split('|')[-2]
if w not in weight: weight.append(w)
if v not in views: views.append(v)
xs.append(chain.source_name)
ys = chain.content_of_axis
cluster_def = {'name': '{}-{}'.format(cluster_def_name, name),
'filter': chain.filter,
'data_key': chain.data_key,
'xs': xs,
'ys': ys,
'views': views,
'weight': weight[-1],
'bases': 'both' if len(weight) == 2 else 'auto',
'cell_items': check_cell_items(views),
'tests': check_sigtest(views)}
cluster_defs.append(cluster_def)
return cluster_defs
from quantipy.core.view_generators.view_specs import ViewManager
cluster_specs = mine_chain_structure(clusters)
for cluster_spec in cluster_specs:
oe = cluster_spec.get('oe', False)
if not oe:
vm = ViewManager(self.stack)
vm.get_views(cell_items=cluster_spec['cell_items'],
weight=cluster_spec['weight'],
bases=cluster_spec['bases'],
stats= ['mean', 'stddev', 'median', 'min', 'max'],
tests=cluster_spec['tests'])
self.get(data_key=cluster_spec['data_key'],
filter_key=cluster_spec['filter'],
x_keys = cluster_spec['xs'],
y_keys = cluster_spec['ys'],
views=vm.views,
orient='x',
prioritize=True)
else:
meta = [cluster_spec['data_key'], cluster_spec['filter']]
df, name = cluster_spec['df'], cluster_spec['name']
self.add(df, meta_from=meta, name=name)
return None
@staticmethod
def _force_list(obj):
if isinstance(obj, (list, tuple)):
return obj
return [obj]
def _check_keys(self, data_key, keys):
""" Checks given keys exist in meta['columns']
"""
keys = self._force_list(keys)
meta = self.stack[data_key].meta
valid = list(meta['columns'].keys()) + list(meta['masks'].keys())
invalid = ['"%s"' % _ for _ in keys if _ not in valid and _ != _TOTAL]
if invalid:
raise ValueError("Keys %s do not exist in meta['columns'] or "
"meta['masks']." % ", ".join(invalid))
return keys
def add(self, structure, meta_from=None, meta=None, name=None):
""" Add a pandas.DataFrame as a Chain.
Parameters
----------
structure : ``pandas.Dataframe``
The dataframe to add to the ChainManger
meta_from : list, list-like, str, default None
The location of the meta in the stack. Either a list-like object with data key and
filter key or a str as the data key
meta : quantipy meta (dict)
External meta used to paint the frame
name : ``str``, default None
The name to give the resulting chain. If not passed, the name will become
the concatenated column names, delimited by a period
Returns
-------
appended : ``quantipy.ChainManager``
"""
name = name or '.'.join(structure.columns.tolist())
chain = Chain(self.stack, name, structure=structure)
chain._frame = chain.structure
chain._index = chain._frame.index
chain._columns = chain._frame.columns
chain._frame_values = chain._frame.values
if meta_from:
if isinstance(meta_from, str):
chain._meta = self.stack[meta_from].meta
else:
data_key, filter_key = meta_from
chain._meta = self.stack[data_key][filter_key].meta
elif meta:
chain._meta = meta
self.__chains.append(chain)
return self
def get(self, data_key, filter_key, x_keys, y_keys, views, orient='x',
rules=True, rules_weight=None, prioritize=True, folder=None):
"""
TODO: Full doc string
Get a (list of) Chain instance(s) in either 'x' or 'y' orientation.
Chain.dfs will be concatenated along the provided 'orient'-axis.
"""
# TODO: VERIFY data_key
# TODO: VERIFY filter_key
# TODO: Add verbose arg to get()
x_keys = self._check_keys(data_key, x_keys)
y_keys = self._check_keys(data_key, y_keys)
if folder and not isinstance(folder, str):
err = "'folder' must be a name provided as string!"
raise ValueError(err)
if orient == 'x':
it, keys = x_keys, y_keys
else:
it, keys = y_keys, x_keys
for key in it:
x_key, y_key = (key, keys) if orient == 'x' else (keys, key)
chain = Chain(self.stack, key)
chain = chain.get(data_key, filter_key, self._force_list(x_key),
self._force_list(y_key), views, rules=rules,
rules_weight=rules_weight, prioritize=prioritize,
orient=orient)
folders = self.folder_names
if folder in folders:
idx = self._idx_from_name(folder)
self.__chains[idx][folder].append(chain)
else:
if folder:
self.__chains.append({folder: [chain]})
else:
self.__chains.append(chain)
return None
def paint_all(self, *args, **kwargs):
"""
Apply labels, sig. testing conversion and other post-processing to the
``Chain.dataframe`` property.
Use this to prepare a ``Chain`` for further usage in an Excel or Power-
point Build.
Parameters
----------
text_key : str, default meta['lib']['default text']
The language version of any variable metadata applied.
text_loc_x : str, default None
The key in the 'text' to locate the text_key for the
``pandas.DataFrame.index`` labels
text_loc_y : str, default None
The key in the 'text' to locate the text_key for the
``pandas.DataFrame.columns`` labels
display : {'x', 'y', ['x', 'y']}, default None
Text
axes : {'x', 'y', ['x', 'y']}, default None
Text
view_level : bool, default False
Text
transform_tests : {False, 'full', 'cells'}, default cells
Text
totalize : bool, default False
Text
Returns
-------
None
The ``.dataframe`` is modified inplace.
"""
for chain in self:
if isinstance(chain, dict):
for c in list(chain.values())[0]:
c.paint(*args, **kwargs)
else:
chain.paint(*args, **kwargs)
return None
HEADERS = ['header-title',
'header-left',
'header-center',
'header-right']
FOOTERS = ['footer-title',
'footer-left',
'footer-center',
'footer-right']
VALID_ANNOT_TYPES = HEADERS + FOOTERS + ['notes']
VALID_ANNOT_CATS = ['header', 'footer', 'notes']
VALID_ANNOT_POS = ['title',
'left',
'center',
'right']
class ChainAnnotations(dict):
def __init__(self):
super(ChainAnnotations, self).__init__()
self.header_title = []
self.header_left = []
self.header_center = []
self.header_right = []
self.footer_title = []
self.footer_left = []
self.footer_center = []
self.footer_right = []
self.notes = []
for v in VALID_ANNOT_TYPES:
self[v] = []
def __setitem__(self, key, value):
self._test_valid_key(key)
return super(ChainAnnotations, self).__setitem__(key, value)
def __getitem__(self, key):
self._test_valid_key(key)
return super(ChainAnnotations, self).__getitem__(key)
def __repr__(self):
headers = [(h.split('-')[1], self[h]) for h in self.populated if
h.split('-')[0] == 'header']
footers = [(f.split('-')[1], self[f]) for f in self.populated if
f.split('-')[0] == 'footer']
notes = self['notes'] if self['notes'] else []
if notes:
ar = 'Notes\n'
ar += '-{:>16}\n'.format(str(notes))
else:
ar = 'Notes: None\n'
if headers:
ar += 'Headers\n'
for pos, text in list(dict(headers).items()):
ar += ' {:>5}: {:>5}\n'.format(str(pos), str(text))
else:
ar += 'Headers: None\n'
if footers:
ar += 'Footers\n'
for pos, text in list(dict(footers).items()):
ar += ' {:>5}: {:>5}\n'.format(str(pos), str(text))
else:
ar += 'Footers: None'
return ar
def _test_valid_key(self, key):
"""
"""
if key not in VALID_ANNOT_TYPES:
splitted = key.split('-')
if len(splitted) > 1:
acat, apos = splitted[0], splitted[1]
else:
acat, apos = key, None
if apos:
if acat == 'notes':
msg = "'{}' annotation type does not support positions!"
msg = msg.format(acat)
elif not acat in VALID_ANNOT_CATS and not apos in VALID_ANNOT_POS:
msg = "'{}' is not a valid annotation type!".format(key)
elif acat not in VALID_ANNOT_CATS:
msg = "'{}' is not a valid annotation category!".format(acat)
elif apos not in VALID_ANNOT_POS:
msg = "'{}' is not a valid annotation position!".format(apos)
else:
msg = "'{}' is not a valid annotation type!".format(key)
raise KeyError(msg)
@property
def header(self):
h_dict = {}
for h in HEADERS:
if self[h]: h_dict[h.split('-')[1]] = self[h]
return h_dict
@property
def footer(self):
f_dict = {}
for f in FOOTERS:
if self[f]: f_dict[f.split('-')[1]] = self[f]
return f_dict
@property
def populated(self):
"""
The annotation fields that are defined.
"""
return sorted([k for k, v in list(self.items()) if v])
@staticmethod
def _annot_key(a_type, a_pos):
if a_pos:
return '{}-{}'.format(a_type, a_pos)
else:
return a_type
def set(self, text, category='header', position='title'):
"""
Add annotation texts defined by their category and position.
Parameters
----------
category : {'header', 'footer', 'notes'}, default 'header'
Defines if the annotation is treated as a *header*, *footer* or
*note*.
position : {'title', 'left', 'center', 'right'}, default 'title'
Sets the placement of the annotation within its category.
Returns
-------
None
"""
if not category: category = 'header'
if not position and category != 'notes': position = 'title'
if category == 'notes': position = None
akey = self._annot_key(category, position)
self[akey].append(text)
self.__dict__[akey.replace('-', '_')].append(text)
return None
CELL_DETAILS = {'en-GB': {'cc': 'Cell Contents',
'N': 'Counts',
'c%': 'Column Percentages',
'r%': 'Row Percentages',
'str': 'Statistical Test Results',
'cp': 'Column Proportions',
'cm': 'Means',
'stats': 'Statistics',
'mb': 'Minimum Base',
'sb': 'Small Base',
'up': ' indicates result is significantly higher than the result in the Total column',
'down': ' indicates result is significantly lower than the result in the Total column'
},
'fr-FR': {'cc': 'Contenu cellule',
'N': 'Total',
'c%': 'Pourcentage de colonne',
'r%': 'Pourcentage de ligne',
'str': 'Résultats test statistique',
'cp': 'Proportions de colonne',
'cm': 'Moyennes de colonne',
'stats': 'Statistiques',
'mb': 'Base minimum',
'sb': 'Petite base',
'up': ' indique que le résultat est significativement supérieur au résultat de la colonne Total',
'down': ' indique que le résultat est significativement inférieur au résultat de la colonne Total'
}}
class Chain(object):
def __init__(self, stack, name, structure=None):
self.stack = stack
self.name = name
self.structure = structure
self.source = 'native'
self.edited = False
self._custom_views = None
self.double_base = False
self.grouping = None
self.sig_test_letters = None
self.totalize = False
self.base_descriptions = None
self.painted = False
self.hidden = False
self.annotations = ChainAnnotations()
self._array_style = None
self._group_style = None
self._meta = None
self._x_keys = None
self._y_keys = None
self._given_views = None
self._grp_text_map = []
self._text_map = None
self._custom_texts = {}
self._transl = qp.core.view.View._metric_name_map()
self._pad_id = None
self._frame = None
self._has_rules = None
self._flag_bases = None
self._is_mask_item = False
self._shapes = None
class _TransformedChainDF(object):
"""
"""
def __init__(self, chain):
c = chain.clone()
self.org_views = c.views
self.df = c._frame
self._org_idx = self.df.index
self._edit_idx = list(range(0, len(self._org_idx)))
self._idx_valmap = {n: o for n, o in
zip(self._edit_idx,
self._org_idx.get_level_values(1))}
self.df.index = self._edit_idx
self._org_col = self.df.columns
self._edit_col = list(range(0, len(self._org_col)))
self._col_valmap = {n: o for n, o in
zip(self._edit_col,
self._org_col.get_level_values(1))}
self.df.columns = self._edit_col
self.array_mi = c._array_style == 0
self.nested_y = c._nested_y
self._nest_mul = self._nesting_multiplier()
return None
def _nesting_multiplier(self):
"""
"""
levels = self._org_col.nlevels
if levels == 2:
return 1
else:
return (levels / 2) + 1
def _insert_viewlikes(self, new_index_flat, org_index_mapped):
inserts = [new_index_flat.index(val) for val in new_index_flat
if not val in list(org_index_mapped.values())]
flatviews = []
for name, no in list(self.org_views.items()):
e = [name] * no
flatviews.extend(e)
for vno, i in enumerate(inserts):
flatviews.insert(i, '__viewlike__{}'.format(vno))
new_views = OrderedDict()
no_of_views = Counter(flatviews)
for fv in flatviews:
if not fv in new_views: new_views[fv] = no_of_views[fv]
return new_views
def _updated_index_tuples(self, axis):
"""
"""
if axis == 1:
current = self.df.columns.values.tolist()
mapped = self._col_valmap
org_tuples = self._org_col.tolist()
else:
current = self.df.index.values.tolist()
mapped = self._idx_valmap
org_tuples = self._org_idx.tolist()
merged = [mapped[val] if val in mapped else val for val in current]
# ================================================================
if (self.array_mi and axis == 1) or axis == 0:
self._transf_views = self._insert_viewlikes(merged, mapped)
else:
self._transf_views = self.org_views
# ================================================================
i = d = 0
new_tuples = []
for merged_val in merged:
idx = i-d if i-d != len(org_tuples) else i-d-1
if org_tuples[idx][1] == merged_val:
new_tuples.append(org_tuples[idx])
else:
empties = ['*'] * self._nest_mul
new_tuple = tuple(empties + [merged_val])
new_tuples.append(new_tuple)
d += 1
i += 1
return new_tuples
def _reindex(self):
"""
"""
y_names = ['Question', 'Values']
if not self.array_mi:
x_names = y_names
else:
x_names = ['Array', 'Questions']
if self.nested_y: y_names = y_names * (self._nest_mul - 1)
tuples = self._updated_index_tuples(axis=1)
self.df.columns = pd.MultiIndex.from_tuples(tuples, names=y_names)
tuples = self._updated_index_tuples(axis=0)
self.df.index = pd.MultiIndex.from_tuples(tuples, names=x_names)
return None
def export(self):
"""
"""
return self._TransformedChainDF(self)
def assign(self, transformed_chain_df):
"""
"""
if not isinstance(transformed_chain_df, self._TransformedChainDF):
raise ValueError("Must pass an exported ``Chain`` instance!")
transformed_chain_df._reindex()
self._frame = transformed_chain_df.df
self.views = transformed_chain_df._transf_views
return None
def __str__(self):
if self.structure is not None:
return '%s...\n%s' % (self.__class__.__name__, str(self.structure.head()))
str_format = ('%s...'
'\nSource: %s'
'\nName: %s'
'\nOrientation: %s'
'\nX: %s'
'\nY: %s'
'\nNumber of views: %s')
return str_format % (self.__class__.__name__,
getattr(self, 'source', 'native'),
getattr(self, 'name', 'None'),
getattr(self, 'orientation', 'None'),
getattr(self, '_x_keys', 'None'),
getattr(self, '_y_keys', 'None'),
getattr(self, 'views', 'None'))
def __repr__(self):
return self.__str__()
def __len__(self):
"""Returns the total number of cells in the Chain.dataframe"""
return (len(getattr(self, 'index', [])) * len(getattr(self, 'columns', [])))
def clone(self):
"""
"""
return copy.deepcopy(self)
@lazy_property
def _default_text(self):
tk = self._meta['lib']['default text']
if tk not in self._transl:
self._transl[tk] = self._transl['en-GB']
return tk
@lazy_property
def orientation(self):
""" TODO: doc string
"""
if len(self._x_keys) == 1 and len(self._y_keys) == 1:
return 'x'
elif len(self._x_keys) == 1:
return 'x'
elif len(self._y_keys) == 1:
return 'y'
if len(self._x_keys) > 1 and len(self._y_keys) > 1:
return None
@lazy_property
def axis(self):
# TODO: name appropriate?
return int(self.orientation=='x')
@lazy_property
def axes(self):
# TODO: name appropriate?
if self.axis == 1:
return self._x_keys, self._y_keys
return self._y_keys, self._x_keys
@property
def dataframe(self):
return self._frame
@property
def index(self):
return self._index
@index.setter
def index(self, index):
self._index = index
@property
def columns(self):
return self._columns
@columns.setter
def columns(self, columns):
self._columns = columns
@property
def frame_values(self):
return self._frame_values
@frame_values.setter
def frame_values(self, frame_values):
self._frame_values = frame_values
@property
def views(self):
return self._views
@views.setter
def views(self, views):
self._views = views
@property
def array_style(self):
return self._array_style
@property
def shapes(self):
if self._shapes is None:
self._shapes = []
return self._shapes
@array_style.setter
def array_style(self, link):
array_style = -1
for view in list(link.keys()):
if link[view].meta()['x']['is_array']:
array_style = 0
if link[view].meta()['y']['is_array']:
array_style = 1
self._array_style = array_style
@property
def pad_id(self):
if self._pad_id is None:
self._pad_id = 0
else:
self._pad_id += 1
return self._pad_id
@property
def sig_levels(self):
sigs = set([v for v in self._valid_views(True)
if v.split('|')[1].startswith('t.')])
tests = [t.split('|')[1].split('.')[1] for t in sigs]
levels = [t.split('|')[1].split('.')[3] for t in sigs]
sig_levels = {}
for m in zip(tests, levels):
l = '.{}'.format(m[1])
t = m[0]
if t in sig_levels:
sig_levels[t].append(l)
else:
sig_levels[t] = [l]
return sig_levels
@property
def cell_items(self):
if self.views:
compl_views = [v for v in self.views if ']*:' in v]
check_views = compl_views[:] or self.views.copy()
for v in check_views:
if v.startswith('__viewlike__'):
if compl_views:
check_views.remove(v)
else:
del check_views[v]
non_freqs = ('d.', 't.')
c = any(v.split('|')[3] == '' and
not v.split('|')[1].startswith(non_freqs) and
not v.split('|')[-1].startswith('cbase')
for v in check_views)
col_pct = any(v.split('|')[3] == 'y' and
not v.split('|')[1].startswith(non_freqs) and
not v.split('|')[-1].startswith('cbase')
for v in check_views)
row_pct = any(v.split('|')[3] == 'x' and
not v.split('|')[1].startswith(non_freqs) and
not v.split('|')[-1].startswith('cbase')
for v in check_views)
c_colpct = c and col_pct
c_rowpct = c and row_pct
c_colrow_pct = c_colpct and c_rowpct
single_ci = not (c_colrow_pct or c_colpct or c_rowpct)
if single_ci:
if c:
return 'counts'
elif col_pct:
return 'colpct'
else:
return 'rowpct'
else:
if c_colrow_pct:
return 'counts_colpct_rowpct'
elif c_colpct:
if self._counts_first():
return 'counts_colpct'
else:
return 'colpct_counts'
else:
return 'counts_rowpct'
@property
def _ci_simple(self):
ci = []
if self.views:
for v in self.views:
if 'significance' in v:
continue
if ']*:' in v:
if v.split('|')[3] == '':
if 'N' not in ci:
ci.append('N')
if v.split('|')[3] == 'y':
if 'c%' not in ci:
ci.append('c%')
if v.split('|')[3] == 'x':
if 'r%' not in ci:
ci.append('r%')
else:
if v.split('|')[-1] == 'counts':
if 'N' not in ci:
ci.append('N')
elif v.split('|')[-1] == 'c%':
if 'c%' not in ci:
ci.append('c%')
elif v.split('|')[-1] == 'r%':
if 'r%' not in ci:
ci.append('r%')
return ci
@property
def ci_count(self):
return len(self.cell_items.split('_'))
@property
def contents(self):
if self.structure:
return
nested = self._array_style == 0
if nested:
dims = self._frame.shape
contents = {row: {col: {} for col in range(0, dims[1])}
for row in range(0, dims[0])}
else:
contents = dict()
for row, idx in enumerate(self._views_per_rows()):
if nested:
for i, v in list(idx.items()):
contents[row][i] = self._add_contents(v)
else:
contents[row] = self._add_contents(idx)
return contents
@property
def cell_details(self):
lang = self._default_text if self._default_text == 'fr-FR' else 'en-GB'
cd = CELL_DETAILS[lang]
ci = self.cell_items
cd_str = '%s (%s)' % (cd['cc'], ', '.join([cd[_] for _ in self._ci_simple]))
against_total = False
if self.sig_test_letters:
mapped = ''
group = None
i = 0 if (self._frame.columns.nlevels in [2, 3]) else 4
for letter, lab in zip(self.sig_test_letters, self._frame.columns.codes[-i]):
if letter == '@':
continue
if group is not None:
if lab == group:
mapped += '/' + letter
else:
group = lab
mapped += ', ' + letter
else:
group = lab
mapped += letter
test_types = cd['cp']
if self.sig_levels.get('means'):
test_types += ', ' + cd['cm']
levels = []
for key in ('props', 'means'):
for level in self.sig_levels.get(key, iter(())):
l = '%s%%' % int(100. - float(level.split('+@')[0].split('.')[1]))
if l not in levels:
levels.append(l)
if '+@' in level:
against_total = True
cd_str = cd_str[:-1] + ', ' + cd['str'] +'), '
cd_str += '%s (%s, (%s): %s' % (cd['stats'], test_types, ', '.join(levels), mapped)
if self._flag_bases:
flags = ([], [])
[(flags[0].append(min), flags[1].append(small)) for min, small in self._flag_bases]
cd_str += ', %s: %s (**), %s: %s (*)' % (cd['mb'], ', '.join(map(str, flags[0])),
cd['sb'], ', '.join(map(str, flags[1])))
cd_str += ')'
cd_str = [cd_str]
if against_total:
cd_str.extend([cd['up'], cd['down']])
return cd_str
def describe(self):
def _describe(cell_defs, row_id):
descr = []
for r, m in list(cell_defs.items()):
descr.append(
[k if isinstance(v, bool) else v for k, v in list(m.items()) if v])
if any('is_block' in d for d in descr):
blocks = self._describe_block(descr, row_id)
calc = 'calc' in blocks
for d, b in zip(descr, blocks):
if b:
d.append(b) if not calc else d.extend([b, 'has_calc'])
return descr
if self._array_style == 0:
description = {k: _describe(v, k) for k, v in list(self.contents.items())}
else:
description = _describe(self.contents, None)
return description
def _fill_cells(self):
"""
"""
self._frame = self._frame.fillna(method='ffill')
return None
# @lazy_property
def _counts_first(self):
for v in self.views:
sname = v.split('|')[-1]
if sname in ['counts', 'c%']:
if sname == 'counts':
return True
else:
return False
#@property
def _views_per_rows(self):
"""
"""
base_vk = 'x|f|x:||{}|cbase'
counts_vk = 'x|f|:||{}|counts'
pct_vk = 'x|f|:|y|{}|c%'
mean_vk = 'x|d.mean|:|y|{}|mean'
stddev_vk = 'x|d.stddev|:|y|{}|stddev'
variance_vk = 'x|d.var|:|y|{}|var'
sem_vk = 'x|d.sem|:|y|{}|sem'
if self.source == 'Crunch multitable':
ci = self._meta['display_settings']['countsOrPercents']
w = self._meta['weight']
if ci == 'counts':
main_vk = counts_vk.format(w if w else '')
else:
main_vk = pct_vk.format(w if w else '')
base_vk = base_vk.format(w if w else '')
metrics = [base_vk] + (len(self.dataframe.index)-1) * [main_vk]
elif self.source == 'Dimensions MTD':
ci = self._meta['cell_items']
w = None
axis_vals = [axv['Type'] for axv in self._meta['index-emetas']]
metrics = []
for axis_val in axis_vals:
if axis_val == 'Base':
metrics.append(base_vk.format(w if w else ''))
if axis_val == 'UnweightedBase':
metrics.append(base_vk.format(w if w else ''))
elif axis_val == 'Category':
metrics.append(counts_vk.format(w if w else ''))
elif axis_val == 'Mean':
metrics.append(mean_vk.format(w if w else ''))
elif axis_val == 'StdDev':
metrics.append(stddev_vk.format(w if w else ''))
elif axis_val == 'StdErr':
metrics.append(sem_vk.format(w if w else ''))
elif axis_val == 'SampleVar':
metrics.append(variance_vk.format(w if w else ''))
return metrics
else:
# Native Chain views
# ----------------------------------------------------------------
if self.edited and (self._custom_views and not self.array_style == 0):
return self._custom_views
else:
if self._array_style != 0:
metrics = []
if self.orientation == 'x':
for view in self._valid_views():
view = self._force_list(view)
initial = view[0]
size = self.views[initial]
metrics.extend(view * size)
else:
for view_part in self.views:
for view in self._valid_views():
view = self._force_list(view)
initial = view[0]
size = view_part[initial]
metrics.extend(view * size)
else:
counts = []
colpcts = []
rowpcts = []
metrics = []
ci = self.cell_items
for v in list(self.views.keys()):
if not v.startswith('__viewlike__'):
parts = v.split('|')
is_completed = ']*:' in v
if not self._is_c_pct(parts):
counts.extend([v]*self.views[v])
if self._is_r_pct(parts):
rowpcts.extend([v]*self.views[v])
if (self._is_c_pct(parts) or self._is_base(parts) or
self._is_stat(parts)):
colpcts.extend([v]*self.views[v])
else:
counts = counts + ['__viewlike__']
colpcts = colpcts + ['__viewlike__']
rowpcts = rowpcts + ['__viewlike__']
dims = self._frame.shape
for row in range(0, dims[0]):
if ci in ['counts_colpct', 'colpct_counts'] and self.grouping:
if row % 2 == 0:
if self._counts_first():
vc = counts
else:
vc = colpcts
else:
if not self._counts_first():
vc = counts
else:
vc = colpcts
else:
vc = counts if ci == 'counts' else colpcts
metrics.append({col: vc[col] for col in range(0, dims[1])})
return metrics
def _valid_views(self, flat=False):
clean_view_list = []
valid = list(self.views.keys())
org_vc = self._given_views
v_likes = [v for v in valid if v.startswith('__viewlike__')]
if isinstance(org_vc, tuple):
v_likes = tuple(v_likes)
view_coll = org_vc + v_likes
for v in view_coll:
if isinstance(v, str):
if v in valid:
clean_view_list.append(v)
else:
new_v = []
for sub_v in v:
if sub_v in valid:
new_v.append(sub_v)
if isinstance(v, tuple):
new_v = list(new_v)
if new_v:
if len(new_v) == 1: new_v = new_v[0]
if not flat:
clean_view_list.append(new_v)
else:
if isinstance(new_v, list):
clean_view_list.extend(new_v)
else:
clean_view_list.append(new_v)
return clean_view_list
def _add_contents(self, viewelement):
"""
"""
if viewelement.startswith('__viewlike__'):
parts = '|||||'
viewlike = True
else:
parts = viewelement.split('|')
viewlike = False
return dict(is_default=self._is_default(parts),
is_c_base=self._is_c_base(parts),
is_r_base=self._is_r_base(parts),
is_e_base=self._is_e_base(parts),
is_c_base_gross=self._is_c_base_gross(parts),
is_counts=self._is_counts(parts),
is_c_pct=self._is_c_pct(parts),
is_r_pct=self._is_r_pct(parts),
is_res_c_pct=self._is_res_c_pct(parts),
is_counts_sum=self._is_counts_sum(parts),
is_c_pct_sum=self._is_c_pct_sum(parts),
is_counts_cumsum=self._is_counts_cumsum(parts),
is_c_pct_cumsum=self._is_c_pct_cumsum(parts),
is_net=self._is_net(parts),
is_block=self._is_block(parts),
is_calc_only = self._is_calc_only(parts),
is_mean=self._is_mean(parts),
is_stddev=self._is_stddev(parts),
is_min=self._is_min(parts),
is_max=self._is_max(parts),
is_median=self._is_median(parts),
is_variance=self._is_variance(parts),
is_sem=self._is_sem(parts),
is_varcoeff=self._is_varcoeff(parts),
is_percentile=self._is_percentile(parts),
is_propstest=self._is_propstest(parts),
is_meanstest=self._is_meanstest(parts),
is_weighted=self._is_weighted(parts),
weight=self._weight(parts),
is_stat=self._is_stat(parts),
stat=self._stat(parts),
siglevel=self._siglevel(parts),
is_viewlike=viewlike)
def _row_pattern(self, target_ci):
"""
"""
cisplit = self.cell_items.split('_')
if target_ci == 'c%':
start = cisplit.index('colpct')
elif target_ci == 'counts':
start = cisplit.index('counts')
repeat = self.ci_count
return (start, repeat)
def _view_idxs(self, view_tags, keep_tests=True, keep_bases=True, names=False, ci=None):
"""
"""
if not isinstance(view_tags, list): view_tags = [view_tags]
rowmeta = self.named_rowmeta
nested = self.array_style == 0
if nested:
if self.ci_count > 1:
rp_idx = self._row_pattern(ci)[0]
rowmeta = rowmeta[rp_idx]
else:
rp_idx = 0
rowmeta = rowmeta[0]
rows = []
for r in rowmeta:
is_code = str(r[0]).isdigit()
if 'is_counts' in r[1] and is_code:
rows.append(('counts', r[1]))
elif 'is_c_pct' in r[1] and is_code:
rows.append(('c%', r[1]))
elif 'is_propstest' in r[1]:
rows.append((r[0], r[1]))
elif 'is_meanstest' in r[1]:
rows.append((r[0], r[1]))
else:
rows.append(r)
invalids = []
if not keep_tests:
invalids.extend(['is_propstest', 'is_meanstest'])
if ci == 'counts':
invalids.append('is_c_pct')
elif ci == 'c%':
invalids.append('is_counts')
idxs = []
names = []
order = []
for i, row in enumerate(rows):
if any([invalid in row[1] for invalid in invalids]):
if not (row[0] == 'All' and keep_bases): continue
if row[0] in view_tags:
order.append(view_tags.index(row[0]))
idxs.append(i)
if nested:
names.append(self._views_per_rows()[rp_idx][i])
else:
names.append(self._views_per_rows()[i])
return (idxs, order) if not names else (idxs, names, order)
@staticmethod
def _remove_grouped_blanks(viewindex_labs):
"""
"""
full = []
for v in viewindex_labs:
if v == '':
full.append(last)
else:
last = v
full.append(last)
return full
def _slice_edited_index(self, axis, positions):
"""
"""
l_zero = axis.get_level_values(0).values.tolist()[0]
l_one = axis.get_level_values(1).values.tolist()
l_one = [l_one[p] for p in positions]
axis_tuples = [(l_zero, lab) for lab in l_one]
if self.array_style == 0:
names = ['Array', 'Questions']
else:
names = ['Question', 'Values']
return pd.MultiIndex.from_tuples(axis_tuples, names=names)
def _non_grouped_axis(self):
"""
"""
axis = self._frame.index
l_zero = axis.get_level_values(0).values.tolist()[0]
l_one = axis.get_level_values(1).values.tolist()
l_one = self._remove_grouped_blanks(l_one)
axis_tuples = [(l_zero, lab) for lab in l_one]
if self.array_style == 0:
names = ['Array', 'Questions']
else:
names = ['Question', 'Values']
self._frame.index = pd.MultiIndex.from_tuples(axis_tuples, names=names)
return None
@property
def named_rowmeta(self):
if self.painted:
self.toggle_labels()
d = self.describe()
if self.array_style == 0:
n = self._frame.columns.get_level_values(1).values.tolist()
n = self._remove_grouped_blanks(n)
mapped = {rowid: list(zip(n, rowmeta)) for rowid, rowmeta in list(d.items())}
else:
n = self._frame.index.get_level_values(1).values.tolist()
n = self._remove_grouped_blanks(n)
mapped = list(zip(n, d))
if not self.painted: self.toggle_labels()
return mapped
@lazy_property
def _nested_y(self):
return any('>' in v for v in self._y_keys)
def _is_default(self, parts):
return parts[-1] == 'default'
def _is_c_base(self, parts):
return parts[-1] == 'cbase'
def _is_r_base(self, parts):
return parts[-1] == 'rbase'
def _is_e_base(self, parts):
return parts[-1] == 'ebase'
def _is_c_base_gross(self, parts):
return parts[-1] == 'cbase_gross'
def _is_base(self, parts):
return (self._is_c_base(parts) or
self._is_c_base_gross(parts) or
self._is_e_base(parts) or
self._is_r_base(parts))
def _is_counts(self, parts):
return parts[1].startswith('f') and parts[3] == ''
def _is_c_pct(self, parts):
return parts[1].startswith('f') and parts[3] == 'y'
def _is_r_pct(self, parts):
return parts[1].startswith('f') and parts[3] == 'x'
def _is_res_c_pct(self, parts):
return parts[-1] == 'res_c%'
def _is_net(self, parts):
return parts[1].startswith(('f', 'f.c:f', 't.props')) and \
len(parts[2]) > 3 and not parts[2] == 'x++'
def _is_calc_only(self, parts):
if self._is_net(parts) and not self._is_block(parts):
return ((self.__has_freq_calc(parts) or
self.__is_calc_only_propstest(parts)) and not
(self._is_counts_sum(parts) or self._is_c_pct_sum(parts)))
else:
return False
def _is_block(self, parts):
if self._is_net(parts):
conditions = parts[2].split('[')
multiple_conditions = len(conditions) > 2
expand = '+{' in parts[2] or '}+' in parts[2]
complete = '*:' in parts[2]
if expand or complete:
return True
if multiple_conditions:
if self.__has_operator_expr(parts):
return True
return False
return False
return False
def _stat(self, parts):
if parts[1].startswith('d.'):
return parts[1].split('.')[-1]
else:
return None
# non-meta relevant helpers
def __has_operator_expr(self, parts):
e = parts[2]
for syntax in [']*:', '[+{', '}+']:
if syntax in e: e = e.replace(syntax, '')
ops = ['+', '-', '*', '/']
return any(len(e.split(op)) > 1 for op in ops)
def __has_freq_calc(self, parts):
return parts[1].startswith('f.c:f')
def __is_calc_only_propstest(self, parts):
return self._is_propstest(parts) and self.__has_operator_expr(parts)
@staticmethod
def _statname(parts):
split = parts[1].split('.')
if len(split) > 1:
return split[1]
return split[-1]
def _is_mean(self, parts):
return self._statname(parts) == 'mean'
def _is_stddev(self, parts):
return self._statname(parts) == 'stddev'
def _is_min(self, parts):
return self._statname(parts) == 'min'
def _is_max(self, parts):
return self._statname(parts) == 'max'
def _is_median(self, parts):
return self._statname(parts) == 'median'
def _is_variance(self, parts):
return self._statname(parts) == 'var'
def _is_sem(self, parts):
return self._statname(parts) == 'sem'
def _is_varcoeff(self, parts):
return self._statname(parts) == 'varcoeff'
def _is_percentile(self, parts):
return self._statname(parts) in ['upper_q', 'lower_q', 'median']
def _is_counts_sum(self, parts):
return parts[-1].endswith('counts_sum')
def _is_c_pct_sum(self, parts):
return parts[-1].endswith('c%_sum')
def _is_counts_cumsum(self, parts):
return parts[-1].endswith('counts_cumsum')
def _is_c_pct_cumsum(self, parts):
return parts[-1].endswith('c%_cumsum')
def _is_weighted(self, parts):
return parts[4] != ''
def _weight(self, parts):
if parts[4] != '':
return parts[4]
else:
return None
def _is_stat(self, parts):
return parts[1].startswith('d.')
def _is_propstest(self, parts):
return parts[1].startswith('t.props')
def _is_meanstest(self, parts):
return parts[1].startswith('t.means')
def _siglevel(self, parts):
if self._is_meanstest(parts) or self._is_propstest(parts):
return parts[1].split('.')[-1]
else:
return None
def _describe_block(self, description, row_id):
if self.painted:
repaint = True
self.toggle_labels()
else:
repaint = False
vpr = self._views_per_rows()
if row_id is not None:
vpr = [v[1] for v in list(vpr[row_id].items())]
idx = self.dataframe.columns.get_level_values(1).tolist()
else:
idx = self.dataframe.index.get_level_values(1).tolist()
idx_view_map = list(zip(idx, vpr))
block_net_vk = [v for v in vpr if len(v.split('|')[2].split('['))>2 or
'[+{' in v.split('|')[2] or '}+]' in v.split('|')[2]]
has_calc = any([v.split('|')[1].startswith('f.c') for v in block_net_vk])
is_tested = any(v.split('|')[1].startswith('t.props') for v in vpr)
if block_net_vk:
expr = block_net_vk[0].split('|')[2]
expanded_codes = set(map(int, re.findall(r'\d+', expr)))
else:
expanded_codes = []
for idx, m in enumerate(idx_view_map):
if idx_view_map[idx][0] == '':
idx_view_map[idx] = (idx_view_map[idx-1][0], idx_view_map[idx][1])
for idx, row in enumerate(description):
if not 'is_block' in row:
idx_view_map[idx] = None
blocks_len = len(expr.split('],')) * (self.ci_count + is_tested)
if has_calc: blocks_len -= (self.ci_count + is_tested)
block_net_def = []
described_nets = 0
for e in idx_view_map:
if e:
if isinstance(e[0], str):
if has_calc and described_nets == blocks_len:
block_net_def.append('calc')
else:
block_net_def.append('net')
described_nets += 1
else:
code = int(e[0])
if code in expanded_codes:
block_net_def.append('expanded')
else:
block_net_def.append('normal')
else:
block_net_def.append(e)
if repaint: self.toggle_labels()
return block_net_def
def get(self, data_key, filter_key, x_keys, y_keys, views, rules=False,
rules_weight=None, orient='x', prioritize=True):
""" Get the concatenated Chain.DataFrame
"""
self._meta = self.stack[data_key].meta
self._given_views = views
self._x_keys = x_keys
self._y_keys = y_keys
concat_axis = 0
if rules:
if not isinstance(rules, list):
self._has_rules = ['x', 'y']
else:
self._has_rules = rules
# use_views = views[:]
# for first in self.axes[0]:
# for second in self.axes[1]:
# link = self._get_link(data_key, filter_key, first, second)
# for v in use_views:
# if v not in link:
# use_views.remove(v)
for first in self.axes[0]:
found = []
x_frames = []
for second in self.axes[1]:
if self.axis == 1:
link = self._get_link(data_key, filter_key, first, second)
else:
link = self._get_link(data_key, filter_key, second, first)
if link is None:
continue
if prioritize: link = self._drop_substituted_views(link)
found_views, y_frames = self._concat_views(
link, views, rules_weight)
found.append(found_views)
try:
if self._meta['columns'][link.x].get('parent'):
self._is_mask_item = True
except KeyError:
pass
# TODO: contains arrary summ. attr.
# TODO: make this work y_frames = self._pad_frames(y_frames)
self.array_style = link
if self.array_style > -1:
concat_axis = 1 if self.array_style == 0 else 0
y_frames = self._pad_frames(y_frames)
x_frames.append(pd.concat(y_frames, axis=concat_axis))
self.shapes.append(x_frames[-1].shape)
self._frame = pd.concat(self._pad(x_frames), axis=self.axis)
if self._group_style == 'reduced' and self.array_style >- 1:
scan_views = [v if isinstance(v, (tuple, list)) else [v]
for v in self._given_views]
scan_views = [v for v in scan_views if len(v) > 1]
no_tests = []
for scan_view in scan_views:
new_views = []
for view in scan_view:
if not view.split('|')[1].startswith('t.'):
new_views.append(view)
no_tests.append(new_views)
cond = any(len(v) >= 2 for v in no_tests)
if cond:
self._frame = self._reduce_grouped_index(self._frame, 2, self._array_style)
if self.axis == 1:
self.views = found[-1]
else:
self.views = found
self.double_base = len([v for v in self.views
if v.split('|')[-1] == 'cbase']) > 1
self._index = self._frame.index
self._columns = self._frame.columns
self._extract_base_descriptions()
del self.stack
return self
def _toggle_bases(self, keep_weighted=True):
df = self._frame
is_array = self._array_style == 0
contents = self.contents[0] if is_array else self.contents
has_wgt_b = [k for k, v in list(contents.items())
if v['is_c_base'] and v['is_weighted']]
has_unwgt_b = [k for k, v in list(contents.items())
if v['is_c_base'] and not v['is_weighted']]
if not (has_wgt_b and has_unwgt_b):
return None
if keep_weighted:
drop_rows = has_unwgt_b
names = ['x|f|x:|||cbase']
else:
drop_rows = has_wgt_b
names = ['x|f|x:||{}|cbase'.format(list(contents.values())[0]['weight'])]
for v in self.views.copy():
if v in names:
del self._views[v]
df = self._frame
if is_array:
cols = [col for x, col in enumerate(df.columns.tolist())
if not x in drop_rows]
df = df.loc[:, cols]
else:
rows = [row for x, row in enumerate(df.index.tolist())
if not x in drop_rows]
df = df.loc[rows, :]
self._frame = df
self._index = df.index
self._columns = df.columns
return None
def _slice_edited_index(self, axis, positions):
"""
"""
l_zero = axis.get_level_values(0).values.tolist()[0]
l_one = axis.get_level_values(1).values.tolist()
l_one = [l_one[p] for p in positions]
axis_tuples = [(l_zero, lab) for lab in l_one]
if self.array_style == 0:
names = ['Array', 'Questions']
else:
names = ['Question', 'Values']
return pd.MultiIndex.from_tuples(axis_tuples, names=names)
def _drop_substituted_views(self, link):
if any(isinstance(sect, (list, tuple)) for sect in self._given_views):
chain_views = list(chain.from_iterable(self._given_views))
else:
chain_views = self._given_views
has_compl = any(']*:' in vk for vk in link)
req_compl = any(']*:' in vk for vk in chain_views)
has_cumsum = any('++' in vk for vk in link)
req_cumsum = any('++' in vk for vk in chain_views)
if (has_compl and req_compl) or (has_cumsum and req_cumsum):
new_link = copy.copy(link)
views = []
for vk in link:
vksplit = vk.split('|')
method, cond, name = vksplit[1], vksplit[2], vksplit[-1]
full_frame = name in ['counts', 'c%']
basic_sigtest = method.startswith('t.') and cond == ':'
if not full_frame and not basic_sigtest: views.append(vk)
for vk in link:
if vk not in views: del new_link[vk]
return new_link
else:
return link
def _pad_frames(self, frames):
""" TODO: doc string
"""
empty_frame = lambda f: pd.DataFrame(index=f.index, columns=f.columns)
max_lab = max(f.axes[self.array_style].size for f in frames)
for e, f in enumerate(frames):
size = f.axes[self.array_style].size
if size < max_lab:
f = pd.concat([f, empty_frame(f)], axis=self.array_style)
order = [None] * (size * 2)
order[::2] = list(range(size))
order[1::2] = list(range(size, size * 2))
if self.array_style == 0:
frames[e] = f.iloc[order, :]
else:
frames[e] = f.iloc[:, order]
return frames
def _get_link(self, data_key, filter_key, x_key, y_key):
"""
"""
base = self.stack[data_key][filter_key]
if x_key in base:
base = base[x_key]
if y_key in base:
return base[y_key]
else:
if self._array_style == -1:
self._y_keys.remove(y_key)
else:
self._x_keys.remove(x_key)
return None
def _index_switch(self, axis):
""" Returns self.dataframe/frame index/ columns based on given x/ y
"""
return dict(x=self._frame.index, y=self._frame.columns).get(axis)
def _pad(self, frames):
""" Pad index/ columns when nlevels is less than the max nlevels
in list of dataframes.
"""
indexes = []
max_nlevels = [max(f.axes[i].nlevels for f in frames) for i in (0, 1)]
for e, f in enumerate(frames):
indexes = []
for i in (0, 1):
if f.axes[i].nlevels < max_nlevels[i]:
indexes.append(self._pad_index(f.axes[i], max_nlevels[i]))
else:
indexes.append(f.axes[i])
frames[e].index, frames[e].columns = indexes
return frames
def _pad_index(self, index, size):
""" Add levels to columns MultiIndex so the nlevels matches
the biggest columns MultiIndex in DataFrames to be concatenated.
"""
pid = self.pad_id
pad = ((size - index.nlevels) // 2)
fill = int((pad % 2) == 1)
names = list(index.names)
names[0:0] = names[:2] * pad
arrays = self._lzip(index.values)
arrays[0:0] = [tuple('#pad-%s' % pid for _ in arrays[i])
for i in range(pad + fill)] * pad
return pd.MultiIndex.from_arrays(arrays, names=names)
@staticmethod
def _reindx_source(df, varname, total):
"""
"""
df.index = df.index.set_levels([varname], level=0, inplace=False)
if df.columns.get_level_values(0).tolist()[0] != varname and total:
df.columns = df.columns.set_levels([varname], level=0, inplace=False)
return df
def _concat_views(self, link, views, rules_weight, found=None):
""" Concatenates the Views of a Chain.
"""
frames = []
totals = [[_TOTAL]] * 2
if found is None:
found = OrderedDict()
if self._text_map is None:
self._text_map = dict()
for view in views:
try:
self.array_style = link
if isinstance(view, (list, tuple)):
if not self.grouping:
self.grouping = True
if isinstance(view, tuple):
self._group_style = 'reduced'
else:
self._group_style = 'normal'
if self.array_style > -1:
use_grp_type = 'normal'
else:
use_grp_type = self._group_style
found, grouped = self._concat_views(link, view, rules_weight, found=found)
if grouped:
frames.append(self._group_views(grouped, use_grp_type))
else:
agg = link[view].meta()['agg']
is_descriptive = agg['method'] == 'descriptives'
is_base = agg['name'] in ['cbase', 'rbase', 'ebase', 'cbase_gross']
is_sum = agg['name'] in ['counts_sum', 'c%_sum']
is_net = link[view].is_net()
oth_src = link[view].has_other_source()
no_total_sign = is_descriptive or is_base or is_sum or is_net
if link[view]._custom_txt and is_descriptive:
statname = agg['fullname'].split('|')[1].split('.')[1]
if not statname in self._custom_texts:
self._custom_texts[statname] = []
self._custom_texts[statname].append(link[view]._custom_txt)
if is_descriptive:
text = agg['name']
try:
self._text_map.update({agg['name']: text})
except AttributeError:
self._text_map = {agg['name']: text}
if agg['text']:
name = dict(cbase='All').get(agg['name'], agg['name'])
try:
self._text_map.update({name: agg['text']})
except AttributeError:
self._text_map = {name: agg['text'],
_TOTAL: 'Total'}
if agg['grp_text_map']:
# try:
if not agg['grp_text_map'] in self._grp_text_map:
self._grp_text_map.append(agg['grp_text_map'])
# except AttributeError:
# self._grp_text_map = [agg['grp_text_map']]
frame = link[view].dataframe
if oth_src:
frame = self._reindx_source(frame, link.x, link.y == _TOTAL)
# RULES SECTION
# ========================================================
# TODO: DYNAMIC RULES:
# - all_rules_axes, rules_weight must be provided not hardcoded
# - Review copy/pickle in original version!!!
rules_weight = None
if self._has_rules:
rules = Rules(link, view, self._has_rules, rules_weight)
# print rules.show_rules()
# rules.get_slicer()
# print rules.show_slicers()
rules.apply()
frame = rules.rules_df()
# ========================================================
if not no_total_sign and (link.x == _TOTAL or link.y == _TOTAL):
if link.x == _TOTAL:
level_names = [[link.y], ['@']]
elif link.y == _TOTAL:
level_names = [[link.x], ['@']]
try:
frame.columns.set_levels(level_names, level=[0, 1],
inplace=True)
except ValueError:
pass
frames.append(frame)
if view not in found:
if self._array_style != 0:
found[view] = len(frame.index)
else:
found[view] = len(frame.columns)
if link[view]._kwargs.get('flag_bases'):
flag_bases = link[view]._kwargs['flag_bases']
try:
if flag_bases not in self._flag_bases:
self._flag_bases.append(flag_bases)
except TypeError:
self._flag_bases = [flag_bases]
except KeyError:
pass
return found, frames
@staticmethod
def _temp_nest_index(df):
"""
Flatten the nested MultiIndex for easier handling.
"""
# Build flat column labels
flat_cols = []
order_idx = []
i = -1
for col in df.columns.values:
flat_col_lab = ''.join(str(col[:-1])).strip()
if not flat_col_lab in flat_cols:
i += 1
order_idx.append(i)
flat_cols.append(flat_col_lab)
else:
order_idx.append(i)
# Drop unwanted levels (keep last Values Index-level in that process)
levels = list(range(0, df.columns.nlevels-1))
drop_levels = levels[:-2]+ [levels[-1]]
df.columns = df.columns.droplevel(drop_levels)
# Apply the new flat labels and resort the columns
df.columns.set_levels(levels=flat_cols, level=0, inplace=True)
df.columns.set_codes(order_idx, level=0, inplace=True)
return df, flat_cols
@staticmethod
def _replace_test_results(df, replacement_map, char_repr):
"""
Swap all digit-based results with letters referencing the column header.
.. note:: The modified df will be stripped of all indexing on both rows
and columns.
"""
all_dfs = []
ignore = False
for col in list(replacement_map.keys()):
target_col = df.columns[0] if col == '@' else col
value_df = df[[target_col]].copy()
if not col == '@':
value_df.drop('@', axis=1, level=1, inplace=True)
values = value_df.replace(np.NaN, '-').values.tolist()
r = replacement_map[col]
new_values = []
case = None
for v in values:
if isinstance(v[0], str):
if char_repr == 'upper':
case = 'up'
elif char_repr == 'lower':
case = 'low'
elif char_repr == 'alternate':
if case == 'up':
case = 'low'
else:
case = 'up'
for no, l in sorted(list(r.items()), reverse=True):
v = [char.replace(str(no), l if case == 'up' else l.lower())
if isinstance(char, str)
else char for char in v]
new_values.append(v)
else:
new_values.append(v)
part_df = pd.DataFrame(new_values)
all_dfs.append(part_df)
letter_df = pd.concat(all_dfs, axis=1)
# Clean it up
letter_df.replace('-', np.NaN, inplace=True)
for signs in [('[', ''), (']', ''), (', ', '.')]:
letter_df = letter_df.applymap(lambda x: x.replace(signs[0], signs[1])
if isinstance(x, str) else x)
return letter_df
@staticmethod
def _get_abc_letters(no_of_cols, incl_total):
"""
Get the list of letter replacements depending on the y-axis length.
"""
repeat_alphabet = int(no_of_cols / 26)
abc = list(string.ascii_uppercase)
letters = list(string.ascii_uppercase)
if repeat_alphabet:
for r in range(0, repeat_alphabet):
letter = abc[r]
extend_abc = ['{}{}'.format(letter, l) for l in abc]
letters.extend(extend_abc)
if incl_total:
letters = ['@'] + letters[:no_of_cols-1]
else:
letters = letters[:no_of_cols]
return letters
def _any_tests(self):
vms = [v.split('|')[1] for v in list(self._views.keys())]
return any('t.' in v for v in vms)
def _no_of_tests(self):
tests = [v for v in list(self._views.keys())
if v.split('|')[1].startswith('t.')]
levels = [v.split('|')[1].split('.')[-1] for v in tests]
return len(set(levels))
def _siglevel_on_row(self):
"""
"""
vpr = self._views_per_rows()
tests = [(no, v) for no, v in enumerate(vpr)
if v.split('|')[1].startswith('t.')]
s = [(t[0],
float(int(t[1].split('|')[1].split('.')[3].split('+')[0]))/100.0)
for t in tests]
return s
def transform_tests(self, char_repr='upper', display_level=True):
"""
Transform column-wise digit-based test representation to letters.
Adds a new row that is applying uppercase letters to all columns (A,
B, C, ...) and maps any significance test's result cells to these column
indicators.
"""
if not self._any_tests(): return None
# Preparation of input dataframe and dimensions of y-axis header
df = self.dataframe.copy()
number_codes = df.columns.get_level_values(-1).tolist()
number_header_row = copy.copy(df.columns)
if self._no_of_tests() != 2 and char_repr == 'alternate':
char_repr = 'upper'
has_total = '@' in self._y_keys
if self._nested_y:
df, questions = self._temp_nest_index(df)
else:
questions = self._y_keys
all_num = number_codes if not has_total else [0] + number_codes[1:]
# Set the new column header (ABC, ...)
column_letters = self._get_abc_letters(len(number_codes), has_total)
vals = df.columns.get_level_values(0).tolist()
mi = pd.MultiIndex.from_arrays(
(vals,
column_letters))
df.columns = mi
self.sig_test_letters = df.columns.get_level_values(1).tolist()
# Build the replacements dict and build list of unique column indices
test_dict = OrderedDict()
for num_idx, col in enumerate(df.columns):
if col[1] == '@':
question = col[1]
else:
question = col[0]
if not question in test_dict: test_dict[question] = {}
number = all_num[num_idx]
letter = col[1]
test_dict[question][number] = letter
letter_df = self._replace_test_results(df, test_dict, char_repr)
# Re-apply indexing & finalize the new crossbreak column header
if display_level:
levels = self._siglevel_on_row()
index = df.index.get_level_values(1).tolist()
for i, l in levels:
index[i] = '#Level: {}'.format(l)
l0 = df.index.get_level_values(0).tolist()[0]
tuples = [(l0, i) for i in index]
index = pd.MultiIndex.from_tuples(
tuples, names=['Question', 'Values'])
letter_df.index = index
else:
letter_df.index = df.index
letter_df.columns = number_header_row
letter_df = self._apply_letter_header(letter_df)
self._frame = letter_df
return self
def _remove_letter_header(self):
self._frame.columns = self._frame.columns.droplevel(level=-1)
return None
def _apply_letter_header(self, df):
"""
"""
new_tuples = []
org_names = [n for n in df.columns.names]
idx = df.columns
for i, l in zip(idx, self.sig_test_letters):
new_tuples.append(i + (l, ))
if not 'Test-IDs' in org_names:
org_names.append('Test-IDs')
mi = pd.MultiIndex.from_tuples(new_tuples, names=org_names)
df.columns = mi
return df
def _extract_base_descriptions(self):
"""
"""
if self.source == 'Crunch multitable':
self.base_descriptions = self._meta['var_meta'].get('notes', None)
else:
base_texts = OrderedDict()
arr_style = self.array_style
if arr_style != -1:
var = self._x_keys[0] if arr_style == 0 else self._y_keys[0]
masks = self._meta['masks']
columns = self._meta['columns']
item = masks[var]['items'][0]['source'].split('@')[-1]
test_item = columns[item]
test_mask = masks[var]
if 'properties' in test_mask:
base_text = test_mask['properties'].get('base_text', None)
elif 'properties' in test_item:
base_text = test_item['properties'].get('base_text', None)
else:
base_text = None
self.base_descriptions = base_text
else:
for x in self._x_keys:
if 'properties' in self._meta['columns'][x]:
bt = self._meta['columns'][x]['properties'].get('base_text', None)
if bt:
base_texts[x] = bt
if base_texts:
if self.orientation == 'x':
self.base_descriptions = list(base_texts.values())[0]
else:
self.base_descriptions = list(base_texts.values())
return None
def _ensure_indexes(self):
if self.painted:
self._frame.index, self._frame.columns = self.index, self.columns
if self.structure is not None:
self._frame.loc[:, :] = self.frame_values
else:
self.index, self.columns = self._frame.index, self._frame.columns
if self.structure is not None:
self.frame_values = self._frame.values
def _finish_text_key(self, text_key, text_loc_x, text_loc_y):
text_keys = dict()
text_key = text_key or self._default_text
if text_loc_x:
text_keys['x'] = (text_loc_x, text_key)
else:
text_keys['x'] = text_key
if text_loc_y:
text_keys['y'] = (text_loc_y, text_key)
else:
text_keys['y'] = text_key
return text_keys
def paint(self, text_key=None, text_loc_x=None, text_loc_y=None, display=None,
axes=None, view_level=False, transform_tests='upper', display_level=True,
add_test_ids=True, add_base_texts='simple', totalize=False,
sep=None, na_rep=None, transform_column_names=None,
exclude_mask_text=False):
"""
Apply labels, sig. testing conversion and other post-processing to the
``Chain.dataframe`` property.
Use this to prepare a ``Chain`` for further usage in an Excel or Power-
point Build.
Parameters
----------
text_keys : str, default None
Text
text_loc_x : str, default None
The key in the 'text' to locate the text_key for the x-axis
text_loc_y : str, default None
The key in the 'text' to locate the text_key for the y-axis
display : {'x', 'y', ['x', 'y']}, default None
Text
axes : {'x', 'y', ['x', 'y']}, default None
Text
view_level : bool, default False
Text
transform_tests : {False, 'upper', 'lower', 'alternate'}, default 'upper'
Text
add_test_ids : bool, default True
Text
add_base_texts : {False, 'all', 'simple', 'simple-no-items'}, default 'simple'
Whether or not to include existing ``.base_descriptions`` str
to the label of the appropriate base view. Selecting ``'simple'``
will inject the base texts to non-array type Chains only.
totalize : bool, default True
Text
sep : str, default None
The seperator used for painting ``pandas.DataFrame`` columns
na_rep : str, default None
numpy.NaN will be replaced with na_rep if passed
transform_column_names : dict, default None
Transformed column_names are added to the labeltexts.
exclude_mask_text : bool, default False
Exclude mask text from mask-item texts.
Returns
-------
None
The ``.dataframe`` is modified inplace.
"""
self._ensure_indexes()
text_keys = self._finish_text_key(text_key, text_loc_x, text_loc_y)
if self.structure is not None:
self._paint_structure(text_key, sep=sep, na_rep=na_rep)
else:
self.totalize = totalize
if transform_tests: self.transform_tests(transform_tests, display_level)
# Remove any letter header row from transformed tests...
if self.sig_test_letters:
self._remove_letter_header()
if display is None:
display = _AXES
if axes is None:
axes = _AXES
self._paint(text_keys, display, axes, add_base_texts,
transform_column_names, exclude_mask_text)
# Re-build the full column index (labels + letter row)
if self.sig_test_letters and add_test_ids:
self._frame = self._apply_letter_header(self._frame)
if view_level:
self._add_view_level()
self.painted = True
return None
def _paint_structure(self, text_key=None, sep=None, na_rep=None):
""" Paint the dataframe-type Chain.
"""
if not text_key:
text_key = self._meta['lib']['default text']
str_format = '%%s%s%%s' % sep
column_mapper = dict()
na_rep = na_rep or ''
pattern = r'\, (?=\W|$)'
for column in self.structure.columns:
if not column in self._meta['columns']: continue
meta = self._meta['columns'][column]
if sep:
column_mapper[column] = str_format % (column, meta['text'][text_key])
else:
column_mapper[column] = meta['text'][text_key]
if meta.get('values'):
values = meta['values']
if isinstance(values, str):
pointers = values.split('@')
values = self._meta[pointers.pop(0)]
while pointers:
values = values[pointers.pop(0)]
if meta['type'] == 'delimited set':
value_mapper = {
str(item['value']): item['text'][text_key]
for item in values
}
series = self.structure[column]
try:
series = (series.str.split(';')
.apply(pd.Series, 1)
.stack(dropna=False)
.map(value_mapper.get) #, na_action='ignore')
.unstack())
first = series[series.columns[0]]
rest = [series[c] for c in series.columns[1:]]
self.structure[column] = (
first
.str.cat(rest, sep=', ', na_rep='')
.str.slice(0, -2)
.replace(to_replace=pattern, value='', regex=True)
.replace(to_replace='', value=na_rep)
)
except AttributeError:
continue
else:
value_mapper = {
item['value']: item['text'][text_key]
for item in values
}
self.structure[column] = (self.structure[column]
.map(value_mapper.get,
na_action='ignore')
)
self.structure[column].fillna(na_rep, inplace=True)
self.structure.rename(columns=column_mapper, inplace=True)
def _paint(self, text_keys, display, axes, bases, transform_column_names,
exclude_mask_text):
""" Paint the Chain.dataframe
"""
indexes = []
for axis in _AXES:
index = self._index_switch(axis)
if axis in axes:
index = self._paint_index(index, text_keys, display, axis,
bases, transform_column_names,
exclude_mask_text)
indexes.append(index)
self._frame.index, self._frame.columns = indexes
def _paint_index(self, index, text_keys, display, axis, bases,
transform_column_names, exclude_mask_text):
""" Paint the Chain.dataframe.index1 """
error = "No text keys from {} found in {}"
level_0_text, level_1_text = [], []
nlevels = index.nlevels
if nlevels > 2:
arrays = []
for i in range(0, nlevels, 2):
index_0 = index.get_level_values(i)
index_1 = index.get_level_values(i+1)
tuples = list(zip(index_0.values, index_1.values))
names = (index_0.name, index_1.name)
sub = pd.MultiIndex.from_tuples(tuples, names=names)
sub = self._paint_index(sub, text_keys, display, axis, bases,
transform_column_names, exclude_mask_text)
arrays.extend(self._lzip(sub.ravel()))
tuples = self._lzip(arrays)
return pd.MultiIndex.from_tuples(tuples, names=index.names)
levels = self._lzip(index.values)
arrays = (self._get_level_0(levels[0], text_keys, display, axis,
transform_column_names, exclude_mask_text),
self._get_level_1(levels, text_keys, display, axis, bases))
new_index = pd.MultiIndex.from_arrays(arrays, names=index.names)
return new_index
def _get_level_0(self, level, text_keys, display, axis,
transform_column_names, exclude_mask_text):
"""
"""
level_0_text = []
for value in level:
if str(value).startswith('#pad'):
pass
elif pd.notnull(value):
if value in list(self._text_map.keys()):
value = self._text_map[value]
else:
text = self._get_text(value, text_keys[axis], exclude_mask_text)
if axis in display:
if transform_column_names:
value = transform_column_names.get(value, value)
value = '{}. {}'.format(value, text)
else:
value = text
level_0_text.append(value)
if '@' in self._y_keys and self.totalize and axis == 'y':
level_0_text = ['Total'] + level_0_text[1:]
return list(map(str, level_0_text))
def _get_level_1(self, levels, text_keys, display, axis, bases):
"""
"""
level_1_text = []
if text_keys[axis] in self._transl:
tk_transl = text_keys[axis]
else:
tk_transl = self._default_text
c_text = copy.deepcopy(self._custom_texts) if self._custom_texts else {}
for i, value in enumerate(levels[1]):
if str(value).startswith('#pad'):
level_1_text.append(value)
elif pd.isnull(value):
level_1_text.append(value)
elif str(value) == '':
level_1_text.append(value)
elif str(value).startswith('#Level: '):
level_1_text.append(value.replace('#Level: ', ''))
else:
translate = list(self._transl[list(self._transl.keys())[0]].keys())
if value in list(self._text_map.keys()) and value not in translate:
level_1_text.append(self._text_map[value])
elif value in translate:
if value == 'All':
text = self._specify_base(i, text_keys[axis], bases)
else:
text = self._transl[tk_transl][value]
if value in c_text:
add_text = c_text[value].pop(0)
text = '{} {}'.format(text, add_text)
level_1_text.append(text)
elif value == 'All (eff.)':
text = self._specify_base(i, text_keys[axis], bases)
level_1_text.append(text)
else:
if any(self.array_style == a and axis == x for a, x in ((0, 'x'), (1, 'y'))):
text = self._get_text(value, text_keys[axis], True)
level_1_text.append(text)
else:
try:
values = self._get_values(levels[0][i])
if not values:
level_1_text.append(value)
else:
for item in self._get_values(levels[0][i]):
if int(value) == item['value']:
text = self._get_text(item, text_keys[axis])
level_1_text.append(text)
except (ValueError, UnboundLocalError):
if self._grp_text_map:
for gtm in self._grp_text_map:
if value in list(gtm.keys()):
text = self._get_text(gtm[value], text_keys[axis])
level_1_text.append(text)
return list(map(str, level_1_text))
@staticmethod
def _unwgt_label(views, base_vk):
valid = ['cbase', 'cbase_gross', 'rbase', 'ebase']
basetype = base_vk.split('|')[-1]
views_split = [v.split('|') for v in views]
multibase = len([v for v in views_split if v[-1] == basetype]) > 1
weighted = base_vk.split('|')[-2]
w_diff = len([v for v in views_split
if not v[-1] in valid and not v[-2] == weighted]) > 0
if weighted:
return False
elif multibase or w_diff:
return True
else:
return False
def _add_base_text(self, base_val, tk, bases):
if self._array_style == 0 and bases != 'all':
return base_val
else:
bt = self.base_descriptions
if isinstance(bt, dict):
bt_by_key = bt[tk]
else:
bt_by_key = bt
if bt_by_key:
if bt_by_key.startswith('%s:' % base_val):
bt_by_key = bt_by_key.replace('%s:' % base_val, '')
return '{}: {}'.format(base_val, bt_by_key)
else:
return base_val
def _specify_base(self, view_idx, tk, bases):
tk_transl = tk if tk in self._transl else self._default_text
base_vk = self._valid_views()[view_idx]
basetype = base_vk.split('|')[-1]
unwgt_label = self._unwgt_label(list(self._views.keys()), base_vk)
if unwgt_label:
if basetype == 'cbase_gross':
base_value = self._transl[tk_transl]['no_w_gross_All']
elif basetype == 'ebase':
base_value = 'Unweighted effective base'
else:
base_value = self._transl[tk_transl]['no_w_All']
else:
if basetype == 'cbase_gross':
base_value = self._transl[tk_transl]['gross All']
elif basetype == 'ebase':
base_value = 'Effective base'
elif not bases or (bases == 'simple-no-items' and self._is_mask_item):
base_value = self._transl[tk_transl]['All']
else:
key = tk
if isinstance(tk, tuple):
_, key = tk
base_value = self._add_base_text(self._transl[tk_transl]['All'],
key, bases)
return base_value
def _get_text(self, value, text_key, item_text=False):
"""
"""
if value in list(self._meta['columns'].keys()):
col = self._meta['columns'][value]
if item_text and col.get('parent'):
parent = list(col['parent'].keys())[0].split('@')[-1]
items = self._meta['masks'][parent]['items']
for i in items:
if i['source'].split('@')[-1] == value:
obj = i['text']
break
else:
obj = col['text']
elif value in list(self._meta['masks'].keys()):
obj = self._meta['masks'][value]['text']
elif 'text' in value:
obj = value['text']
else:
obj = value
return self._get_text_from_key(obj, text_key)
def _get_text_from_key(self, text, text_key):
""" Find the first value in a meta object's "text" key that matches a
text_key for it's axis.
"""
if isinstance(text_key, tuple):
loc, key = text_key
if loc in text:
if key in text[loc]:
return text[loc][key]
elif self._default_text in text[loc]:
return text[loc][self._default_text]
if key in text:
return text[key]
for key in (text_key, self._default_text):
if key in text:
return text[key]
return '<label>'
def _get_values(self, column):
""" Returns values from self._meta["columns"] or
self._meta["lib"]["values"][<mask name>] if parent is "array"
"""
if column in self._meta['columns']:
values = self._meta['columns'][column].get('values', [])
elif column in self._meta['masks']:
values = self._meta['lib']['values'].get(column, [])
if isinstance(values, str):
keys = values.split('@')
values = self._meta[keys.pop(0)]
while keys:
values = values[keys.pop(0)]
return values
def _add_view_level(self, shorten=False):
""" Insert a third Index level containing View keys into the DataFrame.
"""
vnames = self._views_per_rows()
if shorten:
vnames = [v.split('|')[-1] for v in vnames]
self._frame['View'] = pd.Series(vnames, index=self._frame.index)
self._frame.set_index('View', append=True, inplace=True)
def toggle_labels(self):
""" Restore the unpainted/ painted Index, Columns appearance.
"""
if self.painted:
self.painted = False
else:
self.painted = True
attrs = ['index', 'columns']
if self.structure is not None:
attrs.append('_frame_values')
for attr in attrs:
vals = attr[6:] if attr.startswith('_frame') else attr
frame_val = getattr(self._frame, vals)
setattr(self._frame, attr, getattr(self, attr))
setattr(self, attr, frame_val)
if self.structure is not None:
values = self._frame.values
self._frame.loc[:, :] = self.frame_values
self.frame_values = values
return self
@staticmethod
def _single_column(*levels):
""" Returns True if multiindex level 0 has one unique value
"""
return all(len(level) == 1 for level in levels)
def _group_views(self, frame, group_type):
""" Re-sort rows so that they appear as being grouped inside the
Chain.dataframe.
"""
grouped_frame = []
len_of_frame = len(frame)
frame = pd.concat(frame, axis=0)
index_order = frame.index.get_level_values(1).tolist()
index_order = index_order[:int(len(index_order) / len_of_frame)]
gb_df = frame.groupby(level=1, sort=False)
for i in index_order:
grouped_df = gb_df.get_group(i)
if group_type == 'reduced':
grouped_df = self._reduce_grouped_index(grouped_df, len_of_frame-1)
grouped_frame.append(grouped_df)
grouped_frame =
|
pd.concat(grouped_frame, verify_integrity=False)
|
pandas.concat
|
import geopandas as gpd
import networkx as nx
import pandas as pd
import shapely
from shapely.ops import cascaded_union
from syspy.spatial import polygons, spatial
from syspy.syspy_utils import neighbors, pandas_utils, syscolors
from tqdm import tqdm
def compute_coverage_layer(layer, buffer, extensive_cols=[]):
"""
From a given GeoDataFrame layer and a shapely 2D geometry buffer, computes the coverage layer,
i.e. the GeoDataFrame of layer's entities included in the geometry buffer.
Inputs:
- layer: a GeoDataFrame object
- buffer: a shapely Polygon or MultiPolygon
- extensives_cols: a subset of columns whose value are extensives and have to be recomputed
for the new layer (for instance the population of the zone)
Outputs:
a GeoDataFrame with the same columns as the input layer, but different geometry and extensive_cols
"""
# Create
layer_in_buffer = layer.copy()
layer_in_buffer['geometry_intersect'] = layer_in_buffer.intersection(buffer)
# Explode the multipolygons in polygons
layer_in_buffer['geometries'] = layer_in_buffer['geometry_intersect'].apply(
lambda x: x.geoms if x.type == 'MultiPolygon' else [x]
)
layer_in_buffer_exploded = pandas_utils.df_explode(layer_in_buffer, 'geometries')
# Compute intersection area
layer_in_buffer_exploded['area_intersected'] = gpd.GeoSeries(layer_in_buffer_exploded['geometries']).area
# Drop row with null areas
layer_in_buffer_exploded.drop(
layer_in_buffer_exploded[layer_in_buffer_exploded['area_intersected'] == 0].index,
inplace=True
)
# Recompute extensive columns values
for col in extensive_cols:
layer_in_buffer_exploded[col] = layer_in_buffer_exploded.apply(
lambda x: x[col] * x['geometries'].area / x['geometry'].area, 1
)
layer_in_buffer_exploded.drop(['geometry', 'geometry_intersect', 'area_intersected'], 1, inplace=True)
layer_in_buffer_exploded.rename(columns={'geometries': 'geometry'}, inplace=True)
layer_in_buffer_exploded = gpd.GeoDataFrame(layer_in_buffer_exploded)
return layer_in_buffer_exploded
def merge_zonings(background, foreground, min_area_factor=0.01, min_area=None):
back = background.copy()
front = foreground.copy()
stencil = shapely.geometry.MultiPolygon(
list(front['geometry'])
).buffer(1e-9)
back['geometry'] = back['geometry'].apply(lambda g: g.difference(stencil))
back['geometry'] = polygons.biggest_polygons(list(back['geometry']))
back['area'] = [g.area for g in back['geometry']]
min_area = min_area if min_area else back['area'].mean() * min_area_factor
back = back.loc[back['area'] > min_area]
back['id'] = back.index
front['id'] = front.index
back['zoning'] = 'back'
front['zoning'] = 'front'
columns = ['zoning', 'id', 'geometry']
concatenated = pd.concat(
[back[columns], front[columns]]
)
df = concatenated
zones = list(df['geometry'])
clean_zones = polygons.clean_zoning(
zones,
buffer=1e-4,
fill_buffer=2e-3,
fill_gaps=False,
unite_gaps=True
)
df['geometry'] = clean_zones
return df.reset_index(drop=True)
def pool_and_geometries(pool, geometries):
done = []
while len(pool):
# start another snail
done.append(pool[0])
current = geometries[pool[0]]
pool = [p for p in pool if p not in done]
for i in range(len(pool)):
for p in pool:
if geometries[p].intersects(current):
done.append(p)
current = geometries[p]
pool = [p for p in pool if p not in done]
break
return done
def snail_number(zones, center, distance_to='zone'):
if distance_to == 'zone':
distance_series = zones['geometry'].apply(lambda g: center.distance(g))
elif distance_to == 'centroid':
distance_series = zones['geometry'].apply(lambda g: center.distance(g.centroid))
distance_series.name = 'cluster_distance'
distance_series.sort_values(inplace=True)
geometries = zones['geometry'].to_dict()
pool = list(distance_series.index)
done = pool_and_geometries(pool, geometries)
snail =
|
pd.Series(done)
|
pandas.Series
|
import unittest
import pandas as pd
from pandas.util.testing import assert_series_equal
import numpy as np
from easyframes.easyframes import hhkit
class Testrr(unittest.TestCase):
def setUp(self):
"""
df_original = pd.read_csv('sample_hh_dataset.csv')
df = df_original.copy()
print(df.to_dict())
"""
self.df_master = pd.DataFrame(
{'educ': {0: 'pri', 1: 'bach', 2: 'pri', 3: 'hi', 4: 'bach', 5: 'sec',
6: 'hi', 7: 'hi', 8: 'pri', 9: 'pri'},
'hh': {0: 1, 1: 1, 2: 1, 3: 2, 4: 3, 5: 3, 6: 4, 7: 4, 8: 4, 9: 4},
'id': {0: 1, 1: 2, 2: 3, 3: 1, 4: 1, 5: 2, 6: 1, 7: 2, 8: 3, 9: 4},
'has_car': {0: 1, 1: 1, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1, 9: 1},
'weighthh': {0: 2, 1: 2, 2: 2, 3: 3, 4: 2, 5: 2, 6: 3, 7: 3, 8: 3, 9: 3},
'house_rooms': {0: 3, 1: 3, 2: 3, 3: 2, 4: 1, 5: 1, 6: 3, 7: 3, 8: 3, 9: 3},
'prov': {0: 'BC', 1: 'BC', 2: 'BC', 3: 'Alberta', 4: 'BC', 5: 'BC', 6: 'Alberta',
7: 'Alberta', 8: 'Alberta', 9: 'Alberta'},
'age': {0: 44, 1: 43, 2: 13, 3: 70, 4: 23, 5: 20, 6: 37, 7: 35, 8: 8, 9: 15},
'fridge': {0: 'yes', 1: 'yes', 2: 'yes', 3: 'no', 4: 'yes', 5: 'yes', 6: 'no',
7: 'no', 8: 'no', 9: 'no'},
'male': {0: 1, 1: 0, 2: 1, 3: 1, 4: 1, 5: 0, 6: 1, 7: 0, 8: 0, 9: 0}})
self.df_using_hh = pd.DataFrame(
{'hh': {0: 2, 1: 4, 2: 5, 3: 6, 4: 7},
'has_fence': {0: 1, 1: 0, 2: 1, 3: 1, 4: 0}
})
self.df_using_ind = pd.DataFrame(
{'empl': {0: 'ue', 1: 'ft', 2: 'pt', 3: 'pt', 4: 'ft', 5: 'pt',
6: 'se', 7: 'ft', 8: 'se'},
'hh': {0: 1, 1: 1, 2: 1, 3: 2, 4: 5, 5: 5, 6: 4, 7: 4, 8: 4},
'id': {0: 1, 1: 2, 2: 4, 3: 1, 4: 1, 5: 2, 6: 1, 7: 2, 8: 5}
})
# @unittest.skip("demonstrating skipping")
def test_rr_replaces_values_correctly_not_using_include(self):
myhhkit = hhkit(self.df_master)
myhhkit_using_hh = hhkit(self.df_using_hh)
myhhkit.statamerge(myhhkit_using_hh, on=['hh'], mergevarname='_merge_hh')
myhhkit.rr('educ',{'pri':'primary','sec':'secondary','hi':'higher education','bach':'bachelor'})
myhhkit.rr('has_fence', {0:2,1:np.nan,np.nan:-1})
myhhkit.rr('has_car', {0:1,1:0,np.nan:-9})
correct_values_has_fence = pd.Series([-1,-1,-1,np.nan,-1,-1,2,2,2,2,np.nan,np.nan,2],index=np.arange(13)).astype(float)
correct_values_has_car = pd.Series([0,0,0,0,1,1,0,0,0,0,-9,-9,-9],index=np.arange(13)).astype(float)
correct_values_educ = pd.Series(['primary','bachelor','primary','higher education','bachelor',
'secondary','higher education','higher education','primary','primary','nan','nan','nan'],
index=np.arange(13))
assert_series_equal(correct_values_has_car, myhhkit.df['has_car'])
assert_series_equal(correct_values_has_fence, myhhkit.df['has_fence'])
assert_series_equal(correct_values_has_fence, myhhkit.df['has_fence'])
# @unittest.skip("demonstrating skipping")
def test_rr_replaces_values_correctly_using_include(self):
myhhkit = hhkit(self.df_master)
myhhkit_using_hh = hhkit(self.df_using_hh)
myhhkit.statamerge(myhhkit_using_hh, on=['hh'], mergevarname='_merge_hh')
include = pd.Series([True, False, True, False, True, True, False, True, True, True, False, True, False],
index=np.arange(13))
myhhkit.rr('educ',{'pri':'primary','sec':'secondary','hi':'higher education','bach':'bachelor'}, include=include)
myhhkit.rr('has_fence', {0:2,1:np.nan,np.nan:-1}, include=include)
myhhkit.rr('has_car', {0:1,1:0,np.nan:-9}, include=include)
correct_values_has_fence = pd.Series([-1,np.nan,-1,1,-1,-1,0,2,2,2,1,np.nan,0],
index=np.arange(13)).astype(float)
correct_values_has_car = pd.Series([0,1,0,1,1,1,1,0,0,0,np.nan,-9,np.nan],
index=np.arange(13)).astype(float)
correct_values_educ = pd.Series(['primary','bach','primary','hi','bachelor',
'secondary','hi','higher education','primary','primary',np.nan,np.nan,np.nan],
index=np.arange(13))
assert_series_equal(correct_values_has_car, myhhkit.df['has_car'])
|
assert_series_equal(correct_values_has_fence, myhhkit.df['has_fence'])
|
pandas.util.testing.assert_series_equal
|
import os
from numbers import Integral
import pandas as pd
import polars as pl
import pyarrow as pa
from featherstore._metadata import METADATA_FOLDER_NAME, Metadata
from featherstore._table import common
from featherstore._table import _table_utils
def table_not_exists(table_path):
table_name = table_path.rsplit('/')[-1]
if not os.path.exists(table_path):
raise FileNotFoundError(f"Table '{table_name}' doesn't exist")
def table_already_exists(table_path):
table_name = table_path.rsplit('/')[-1]
if os.path.exists(table_path):
raise OSError(f"A table with name '{table_name}' already exists")
def table_name_is_not_str(table_name):
if not isinstance(table_name, str):
raise TypeError(
f"'table_name' must be a str (is type {type(table_name)})")
def table_name_is_forbidden(table_name):
if table_name == METADATA_FOLDER_NAME:
raise ValueError(f"Table name '{METADATA_FOLDER_NAME}' is forbidden")
def df_is_not_supported_table_dtype(df):
if not isinstance(df, (pd.DataFrame, pd.Series, pl.DataFrame, pa.Table)):
raise TypeError(f"'df' must be a supported DataFrame dtype (is type {type(df)})")
def df_is_not_pandas_table(df):
if not isinstance(df, (pd.DataFrame, pd.Series)):
raise TypeError(
f"'df' must be a pd.DataFrame or pd.Series (is type {type(df)})")
def to_argument_is_not_list(to):
is_valid_col_format = isinstance(to, list)
if not is_valid_col_format:
raise TypeError(f"'to' must be of type list (is type {type(to)})")
def cols_argument_is_not_list_or_none(cols):
is_valid_col_format = isinstance(cols, (list, type(None)))
if not is_valid_col_format:
raise TypeError(f"'cols' must be either list or None (is type {type(cols)})")
def cols_argument_is_not_list_or_dict(cols):
is_valid_col_format = isinstance(cols, (list, dict))
if not is_valid_col_format:
raise TypeError(f"'cols' must be either list or dict (is type {type(cols)})")
def cols_argument_items_is_not_str(cols):
if isinstance(cols, dict):
col_elements_are_str = all(isinstance(item, str) for item in cols.keys())
else:
col_elements_are_str = all(isinstance(item, str) for item in cols)
if not col_elements_are_str:
raise TypeError("Elements in 'cols' must be of type str")
def cols_does_not_match(df, table_path):
stored_data_cols = Metadata(table_path, "table")["columns"]
has_default_index = Metadata(table_path, "table")["has_default_index"]
new_data_cols = _table_utils.get_col_names(df, has_default_index)
if sorted(new_data_cols) != sorted(stored_data_cols):
raise ValueError("New and old columns doesn't match")
def cols_not_in_table(cols, table_path):
table_metadata = Metadata(table_path, 'table')
stored_cols = table_metadata["columns"]
cols = common.filter_cols_if_like_provided(cols, stored_cols)
some_cols_not_in_stored_cols = set(cols) - set(stored_cols)
if some_cols_not_in_stored_cols:
raise IndexError("Trying to access a column not found in table")
def rows_argument_is_not_supported_dtype(rows):
is_valid_row_format = isinstance(rows, (list, pd.Index, type(None)))
if not is_valid_row_format:
raise TypeError(f"'rows' must be either List, pd.Index or None (is type {type(rows)})")
def rows_argument_items_dtype_not_same_as_index(rows, table_path):
index_dtype = Metadata(table_path, "table")["index_dtype"]
if rows is not None and not _rows_dtype_matches_index(rows, index_dtype):
raise TypeError("'rows' dtype doesn't match table index dtype")
def _rows_dtype_matches_index(rows, index_dtype):
row = rows[-1]
matches_dtime_idx = _check_if_row_and_index_is_temporal(row, index_dtype)
matches_str_idx = _check_if_row_and_index_is_str(row, index_dtype)
matches_int_idx = _check_if_row_and_index_is_int(row, index_dtype)
row_type_matches_idx = matches_dtime_idx or matches_str_idx or matches_int_idx
return row_type_matches_idx
def _check_if_row_and_index_is_temporal(row, index_dtype):
if _table_utils.str_is_temporal_dtype(index_dtype):
return _isinstance_temporal(row)
return False
def _check_if_row_and_index_is_str(row, index_dtype):
if _table_utils.str_is_string_dtype(index_dtype):
return _isinstance_str(row)
return False
def _check_if_row_and_index_is_int(row, index_dtype):
if _table_utils.str_is_int_dtype(index_dtype):
return _isinstance_int(row)
return False
def _isinstance_temporal(obj):
try:
_ =
|
pd.to_datetime(obj)
|
pandas.to_datetime
|
import pandas as pd
import datetime
import os
def prepare_data(time0, time9, symbol, fgCov=False, prep_new=True, mode='test'):
path = '/home/ubuntu/backtrader-binance-futures/Data/binance/futures/'
df9path = f'../data/{symbol}_1m_{mode}.csv'
if prep_new:
time0 =
|
pd.to_datetime(time0)
|
pandas.to_datetime
|
# standard libraries
import os
# third-party libraries
import pandas as pd
# local imports
from .. import count_data
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class TestCsvToDf:
"""
Tests converting a csv with various headers into a processible DataFrame
"""
def test_timestamp(self):
"""
Check if a csv w/ a timestamp is properly converted to the desired DataFrame
"""
data = os.path.join(THIS_DIR, 'test_timestamp.csv')
element_id = 'tagID'
timestamp = 'timestamp'
lat = 'lat'
lon = 'lon'
test_df = count_data.csv_to_df(data, element_id=element_id, timestamp=timestamp, lat=lat, lon=lon)
assert pd.util.hash_pandas_object(test_df).sum() == -6761865716520410554
def test_timestamp_ba(self):
"""
Check if a csv w/ a timestamp and grouped counts is properly converted to the desired DataFrame
"""
data = os.path.join(THIS_DIR, 'test_timestamp_ba.csv')
element_id = 'tagID'
timestamp = 'timestamp'
boardings = 'boardings'
alightings = 'alightings'
lat = 'lat'
lon = 'lon'
test_df = count_data.csv_to_df(data, element_id=element_id, timestamp=timestamp,
boardings=boardings, alightings=alightings, lat=lat, lon=lon)
assert pd.util.hash_pandas_object(test_df).sum() == 7008548250528393651
def test_session(self):
"""
Check if a csv w/ session times is properly converted to the desired DataFrame
"""
data = os.path.join(THIS_DIR, 'test_session.csv')
element_id = 'MacPIN'
session_start = 'SessionStart_Epoch'
session_end = 'SessionEnd_Epoch'
lat = 'GPS_LAT'
lon = 'GPS_LONG'
test_df = count_data.csv_to_df(data, element_id=element_id, session_start=session_start, session_end=session_end, lat=lat, lon=lon)
assert pd.util.hash_pandas_object(test_df).sum() == 7098407329788286247
def test_session_ba(self):
"""
Check if a csv w/ session times and grouped counts is properly converted to the desired DataFrame
"""
data = os.path.join(THIS_DIR, 'test_session_ba.csv')
element_id = 'MacPIN'
session_start = 'SessionStart_Epoch'
session_end = 'SessionEnd_Epoch'
boardings = 'boardings'
alightings = 'alightings'
lat = 'GPS_LAT'
lon = 'GPS_LONG'
test_df = count_data.csv_to_df(data, element_id=element_id, session_start=session_start, session_end=session_end,
boardings=boardings, alightings=alightings, lat=lat, lon=lon)
assert pd.util.hash_pandas_object(test_df).sum() == 2589903708124850504
class TestStandardizeDatetime:
"""
Tests ensuring all times are datetime format
"""
def test_no_change_needed(self):
"""
Tests if all timestamps are already datetime and no change is needed
"""
test_times = ['2018-02-22 20:08:00', '2018-02-09 18:05:00', '2018-02-09 18:26:00']
test_df = pd.DataFrame(test_times, columns=['timestamp'])
test_df['timestamp'] =
|
pd.to_datetime(test_df['timestamp'])
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(
|
StringIO(self.data1)
|
pandas.compat.StringIO
|
"""
This script Generate features for each author based on the 2 graphes in the dataset:
- the weighted graph
- the unweighted graph
for each node, it calculate the following features:
1- degree in the unweighted graph
2- degree in the weighted graph
3- core number in the unweighted graph
4- core number in the weighted graph
5- the centrality in the unweighted graph
6- the centrality in the weighted graph
7- log10 of the pagerank in the unweighted graph
8- log10 of the pagerank in the weighted graph
9- spectral clustering coefficient in the unweighted graph
10- clustering coefficient in the unweighted graph
11- betweeness coefficient in the unweighted graph
12- mean of the neighbours degree in unweighted graph
13- max of the neighbours degree in unweighted graph
14- min of the neighbours degree in unweighted graph
15- mean of the neighbours degree in weighted graph
16- max of the neighbours degree in weighted graph
17- min of the neighbours degree in weighted graph
18- mean of the neighbours page rank in unweighted graph
19- max of the neighbours page rank in unweighted graph
20- min of the neighbours page rank in unweighted graph
21- mean of the neighbours page rank in weighted graph
22- max of the neighbours page rank in weighted graph
23- min of the neighbours page rank in weighted graph
It take as input two files:
- collaboration_network.edgelist
- new_weighted.edgelist
It ouputs one file:
- graph_features.csv
"""
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from scipy.sparse.linalg import eigs
from scipy import sparse
from random import randint
from sklearn.cluster import KMeans
def spectral_clustering(G, k):
"""
:param G: networkx graph
:param k: number of clusters
:return: clustering labels, dictionary of nodeID: clustering_id
"""
A = nx.adjacency_matrix(G)
d = np.array([G.degree(node) for node in G.nodes()])
D_inv = sparse.diags(1 / d)
n = len(d) # number of nodes
L_rw = sparse.eye(n) - D_inv @ A
eig_values, eig_vectors = eigs(L_rw, k, which='SR')
eig_vectors = eig_vectors.real
kmeans = KMeans(n_clusters=k)
kmeans.fit(eig_vectors)
clustering_labels = {node: kmeans.labels_[i] for i, node in enumerate(G.nodes())}
return clustering_labels
# load the graph
G = nx.read_edgelist('../data/collaboration_network.edgelist', delimiter=' ', nodetype=int)
WG = nx.read_edgelist('../data/denser_graph.edgelist', nodetype=int, data=(("weight", float), ))
n_nodes = G.number_of_nodes()
n_edges = G.number_of_edges()
print('Number of nodes:', n_nodes)
print('Number of edges:', n_edges)
# computes structural features for each node
core_number = nx.core_number(G)
core_number_w = nx.core_number(WG)
spectral_c = spectral_clustering(G, 10)
clustering_coef = nx.clustering(WG)
betweeness_coef = nx.betweenness_centrality(G, k=256)
centrality = nx.eigenvector_centrality(G)
centrality_w = nx.eigenvector_centrality(WG)
print("Centrality measures generated")
# papers count dictionary, for each author how many papers does he wrote.
df_papers =
|
pd.read_csv('../features/number_of_papers_features.csv', dtype={'authorID': str})
|
pandas.read_csv
|
import cantera as ct
from .. import simulation as sim
from ...cti_core import cti_processor as ctp
import pandas as pd
import numpy as np
import time
import copy
import re
import MSI.simulations.instruments.shock_tube as st
import time
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
class flow_reactor(sim.Simulation):
def __init__(self,pressure:float,temperature:float,observables:list,
kineticSens:int,physicalSens:int,conditions:dict,thermalBoundary,
mechanicalBoundary,
processor:ctp.Processor=None,cti_path="",
save_physSensHistories=0,moleFractionObservables:list=[],
concentrationObservables:list=[],
fullParsedYamlFile:dict={}, save_timeHistories:int=0,
log_file=False,log_name='log.txt',timeshift:float=0.0,initialTime:float=0.0,
residenceTime:float=1.0):
'''
Contains methods and objects to run a single flow reactor.
Parameters
----------
pressure : float
Pressure in [atm].
temperature : float
Temperature in [K].
observables : list
Species which sensitivity analysis is performed for.
kineticSens : int
0 for off, 1 for on.
physicalSens : int
0 for off, 1 for on.
conditions : dict
Initial mole fractions for species in simulation.
thermalBoundary : str
Thermal boundary condition inside the reactor. Shock tubes can
either be adiabatic or isothermal.
mechanicalBoundary : str
Mechanical bondary condition inside the reactor. Shock tubes can
either be constant pressure or constant volume.
processor : ctp.Processor, optional
Loaded cti file. The default is None.
cti_path : TYPE, optional
Path of cti file for running. If processor is provided this is not
needed. The default is "".
save_physSensHistories : Bool, optional
Boolean variable describing if physical sensitivity time histories
are saved. 0 for not saved, 1 for saved. The default is 0.
moleFractionObservables : list, optional
Species for which experimental data in the form of mole fraction
time histories will be provided for optimization.
Kinetic sensitivities are calculated for all these species.
The default is [].
concentrationObservables : list, optional
Species for which experimental data in the form of concentration
time histories will be provided for optimization.
Kinetic sensitivities are calculated for all these species.
The default is [].
fullParsedYamlFile : dict, optional
Full dictionary from the parsed shock tube yaml file.
The default is {}.
save_timeHistories : int, optional
Boolean variable describing if time histories for simulation runs
are saved. 0 for not saved, 1 for saved. The default is 0.
log_file : bool, optional
If True the simulation will write out a log file for sensitivity.
The default is False.
log_name : str, optional
Log file name. The default is 'log.txt'.
timeshift : float, optional
The numerical value by which the time vector of the simulation
is shifted in seconds. The default is 0.
initialTime : float, optional
Time to begin simulation from (s).
residenceTime : float, optional
The time which the reactor will be run until. The default is 1.0.
Returns
-------
None.
'''
if processor!=None and cti_path!="":
print("Error: Cannot give both a processor and a cti file path, pick one")
elif processor==None and cti_path=="":
print("Error: Must give either a processor or a cti file path")
if processor != None:
self.processor = processor
elif cti_path!="":
self.processor = ctp.Processor(cti_path)
self.pressure=pressure
self.temperature=temperature
self.observables=observables
self.kineticSens=kineticSens
self.physicalSens=physicalSens
self.conditions=conditions
self.cti_path=cti_path
self.thermalBoundary = thermalBoundary
self.mechanicalBoundary=mechanicalBoundary
self.kineticSensitivities= None
self.experimentalData = None
self.concentrationObservables = concentrationObservables
self.moleFractionObservables = moleFractionObservables
self.fullParsedYamlFile = fullParsedYamlFile
#self.energycon='off'
self.timeshift=timeshift
self.timeHistory = None
self.experimentalData = None
self.initialTime=initialTime
self.residenceTime = residenceTime
self.finalTime = self.timeshift + self.residenceTime
self.log_name=log_name
self.log_file=log_file
#self.yaml_file=yaml_file
if save_timeHistories == 1:
self.timeHistories=[]
self.timeHistoryInterpToExperiment = None
self.pressureAndTemperatureToExperiment = None
else:
self.timeHistories=None
if save_physSensHistories == 1:
self.physSensHistories = []
self.setTPX()
self.dk = 0.01
self.solution=None
def run_shocktube(self,ksens_marker:int=1 ,psens_marker:int=1):
'''
Function calls and runs a shock tube simulation with the appropriate
ksens_marker and psens_marker depending on the situation.
Parameters
----------
ksens_marker : int, optional
If 1 kinetic sensitivity on, if 0 off.
The default is 1.
psens_marker : TYPE, optional
If 1 physical sensitivity on, if 0 off.
The default is 1.
Returns
-------
shock_tube : shock_tube_object
Shock tube simulation and all variables, functions and
object it contains.
'''
if ksens_marker ==0 and psens_marker==0:
shock_tube = st.shockTube(pressure =self.pressure,
temperature = self.temperature,
observables = self.observables,
kineticSens = 0,
physicalSens = 0,
conditions = self.conditions,
initialTime = self.initialTime,
finalTime = self.finalTime,
thermalBoundary = self.thermalBoundary,
mechanicalBoundary = self.mechanicalBoundary,
processor = self.processor,
save_timeHistories = 1,
save_physSensHistories = 0,
moleFractionObservables = self.moleFractionObservables,
concentrationObservables = self.concentrationObservables,
fullParsedYamlFile = self.fullParsedYamlFile,
time_shift_value = self.timeshift)
shock_tube.run()
return shock_tube
elif ksens_marker ==1 and psens_marker==0:
shock_tube = st.shockTube(pressure =self.pressure,
temperature = self.temperature,
observables = self.observables,
kineticSens = 1,
physicalSens = 0,
conditions = self.conditions,
initialTime = self.initialTime,
finalTime = self.finalTime,
thermalBoundary = self.thermalBoundary,
mechanicalBoundary = self.mechanicalBoundary,
processor = self.processor,
save_timeHistories = 1,
save_physSensHistories = 0,
moleFractionObservables = self.moleFractionObservables,
concentrationObservables = self.concentrationObservables,
fullParsedYamlFile = self.fullParsedYamlFile,
time_shift_value = self.timeshift)
shock_tube.run()
return shock_tube
elif ksens_marker ==0 and psens_marker==1:
shock_tube = st.shockTube(pressure =self.pressure,
temperature = self.temperature,
observables = self.observables,
kineticSens = 0,
physicalSens = 1,
conditions = self.conditions,
initialTime = self.initialTime,
finalTime = self.finalTime,
thermalBoundary = self.thermalBoundary,
mechanicalBoundary = self.mechanicalBoundary,
processor = self.processor,
save_timeHistories = 1,
save_physSensHistories = 0,
moleFractionObservables = self.moleFractionObservables,
concentrationObservables = self.concentrationObservables,
fullParsedYamlFile = self.fullParsedYamlFile,
time_shift_value = self.timeshift)
shock_tube.run()
return shock_tube
elif ksens_marker ==1 and psens_marker==1:
shock_tube = st.shockTube(pressure =self.pressure,
temperature = self.temperature,
observables = self.observables,
kineticSens = 1,
physicalSens = 1,
conditions = self.conditions,
initialTime = self.initialTime,
finalTime = self.finalTime,
thermalBoundary = self.thermalBoundary,
mechanicalBoundary = self.mechanicalBoundary,
processor = self.processor,
save_timeHistories = 1,
save_physSensHistories = 0,
moleFractionObservables = self.moleFractionObservables,
concentrationObservables = self.concentrationObservables,
fullParsedYamlFile = self.fullParsedYamlFile,
time_shift_value = self.timeshift)
shock_tube.run()
return shock_tube
def run_single(self,ksens_marker:int=1,psens_marker:int=1):
'''
Runs either a single temperature, pressure or species set for a flow
reactor.
Parameters
----------
ksens_marker : int, optional
If 1 kinetic sensitivity on, if 0 off. The default is 1.
psens_marker : int, optional
If 1 physical sensitivity on, if 0 off.. The default is 1.
Returns
-------
res_time_measurment : Pandas Data Frame
Pandas Data Frame for either a single pressure, temperature,
or species set containing reactor results.
kineticSensitivities: numpy array
Array containing kinetic sensitivities for either a single
pressure, temperature or species set.
timehistory: Pandas Data Frame
Pandas data frame containing data for full time history of either
a single pressure, temperature, or species set.
temp_arrays
Variable for testing.
'''
if self.kineticSens:
s = self.run_shocktube(ksens_marker=1,psens_marker=0)
self.timehistory=copy.deepcopy(s.timeHistory)
res_time_measurment=None
res_time_measurment,index,initial_temp = self.get_res_time_data(self.timehistory,self.finalTime)
#print(s.kineticSensitivities.shape)
#ksens = s.kineticSensitivities[-1,:,:]
ksens,temp_arrays = self.get_ksens_at_res_time(s.kineticSensitivities,self.timehistory['time'],self.finalTime)
#ksens = s.kineticSensitivities[index,:,:]
#xdim = s.kineticSensitivities.shape[0]
#ydim = s.kineticSensitivities.shape[1]
#zdim = s.kineticSensitivities.shape[2]
#ksens = ksens.reshape((1,ydim,zdim))
self.kineticSensitivities = ksens
elif ksens_marker==0 and psens_marker==1:
s = self.run_shocktube(ksens_marker=0,psens_marker=1)
self.timehistory=copy.deepcopy(s.timeHistory)
res_time_measurment=None
res_time_measurment,index,initial_temp = self.get_res_time_data(self.timehistory,self.finalTime)
else:
s = self.run_shocktube(ksens_marker=0,psens_marker=0)
self.timehistory=copy.deepcopy(s.timeHistory)
res_time_measurment=None
res_time_measurment,index,initial_temp = self.get_res_time_data(self.timehistory,self.finalTime)
if self.kineticSens:
return res_time_measurment,self.kineticSensitivities,self.timehistory,temp_arrays
else:
return res_time_measurment,[],None,None
def get_ksens_at_res_time(self,ksens,time_array,res_time):
'''
Helper function that takes the full time history of kinetic
sensitivities and returns the data at the time step for which
the residence time occurs. Using linear interpolation if needed.
Parameters
----------
ksens : numpy array
Three dimensional numpy array that contains kinetic sensitivities.
time_array : pandas series
Time column of time history pandas data frame.
res_time : float
Residence time value.
Returns
-------
ksens_array : numpy array
kinetic sensitivity array where all times but the residence time
have been removed.
temp_arrays : numpy array
Variable for testing.
'''
ksens_array = []
temp_arrays = []
for sheet in range(ksens.shape[2]):
temp = ksens[:,:,sheet]
time=time_array.values
time=time.reshape((time.shape[0],1))
temp_with_time = np.hstack((time,temp))
df =copy.deepcopy(temp_with_time)
df = pd.DataFrame(temp_with_time)
df=df.rename(columns = {0:'time'})
temp_arrays.append(df)
df.loc[-1, 'time'] = float(res_time)
df = df.sort_values('time').reset_index(drop=True)
df = df.interpolate()
res_time_k_sens_data = df.iloc[(df['time']-res_time).abs().argsort()[:1]]
res_time_k_sens_data = res_time_k_sens_data.reset_index(drop=True)
res_time_k_sens_data = res_time_k_sens_data.drop(columns="time")
res_time_k_sens_data = res_time_k_sens_data.to_numpy()
res_time_k_sens_data = res_time_k_sens_data.reshape((res_time_k_sens_data.shape[0],res_time_k_sens_data.shape[1],1))
ksens_array.append(res_time_k_sens_data)
ksens_array = np.dstack((ksens_array))
return ksens_array,temp_arrays
def get_res_time_data(self,data,res_time):
'''
Helper function that takes the full time history of species, pressure
and temperature data and returns the data at the time step for which
the residence time occurs. Using linear interpolation if needed.
Parameters
----------
data : Pandas Data Frame
Pandas Data Frame containing the time history for the reactor.
res_time : float
Residence time.
Returns
-------
res_time_data : Pandas Data Frame
Time history data at the residence time.
index : int
index at which the residence time is occuring.
initial_temp : float
Initial temperature the simulation starts at.
'''
#res_time_data = data.tail(1)
#res_time_data = res_time_data.reset_index(drop=True)
#print(res_time)
#reset index
df = copy.deepcopy(data)
initial_temp=df.head(1)['temperature']
df.loc[-1, 'time'] = float(res_time)
df = df.sort_values('time').reset_index(drop=True)
df = df.interpolate()
res_time_data = df.iloc[(df['time']-res_time).abs().argsort()[:1]]
res_time_data = res_time_data.reset_index(drop=True)
res_time_data['initial_temperature'] = initial_temp
index = df.iloc[(df['time']-res_time).abs().argsort()[:1]].index.values[0]
return res_time_data,index,initial_temp
def sensitivityCalculation(self,originalValues,newValues,dk=.01):
'''
Function to calculate the log log sensitivity of two pandas
data frames.
Parameters
----------
originalValues : numpy array
Original results of variable sensitivity is being calculated for.
newValues : numpy array
Perturbed results of variable sensitivity is being calculated for.
dk : float, optional
Percent as a decimal by which the new values were perturbed.
The default is .01.
Returns
-------
sensitivity : numpy array
Calculated sensitivity.
'''
sensitivity=(np.log(newValues)-np.log(originalValues))/dk
return sensitivity
class flow_reactor_wrapper(sim.Simulation):
def __init__(self,pressure:float,temperatures:list,observables:list,
kineticSens:int,physicalSens:int,conditions:dict,thermalBoundary,
mechanicalBoundary,
processor:ctp.Processor=None,cti_path="",
save_physSensHistories=0,moleFractionObservables:list=[],
concentrationObservables:list=[],
fullParsedYamlFile:dict={}, save_timeHistories:int=0,
timeshifts:list=[],initialTime:float=0.0,
residenceTimes:list=1.0):
'''
Contains methods and objects to run a flow reactor for various
temperatures.
Parameters
----------
pressure : float
Pressure in [atm].
temperatures : list
Temperature in [K].
observables : list
Species which sensitivity analysis is performed for.
kineticSens : int
0 for off, 1 for on.
physicalSens : int
0 for off, 1 for on.
conditions : dict
Initial mole fractions for species in simulation.
thermalBoundary : str
Thermal boundary condition inside the reactor. Shock tubes can
either be adiabatic or isothermal.
mechanicalBoundary : str
Mechanical bondary condition inside the reactor. Shock tubes can
either be constant pressure or constant volume.
processor : ctp.Processor, optional
Loaded cti file. The default is None.
cti_path : str, optional
Path of cti file for running. If processor is provided this is not
needed. The default is "".
save_physSensHistories : bool, optional
Boolean variable describing if physical sensitivity time histories
are saved. 0 for not saved, 1 for saved. The default is 0.
moleFractionObservables : list, optional
Species for which experimental data in the form of mole fraction
time histories will be provided for optimization.
Kinetic sensitivities are calculated for all these species.
The default is [].
concentrationObservables : list, optional
Species for which experimental data in the form of concentration
time histories will be provided for optimization.
Kinetic sensitivities are calculated for all these species.
The default is [].
fullParsedYamlFile : dict, optional
Full dictionary from the parsed shock tube yaml file.
The default is {}.
save_timeHistories : int, optional
Boolean variable describing if time histories for simulation runs
are saved. 0 for not saved, 1 for saved. The default is 0.
timeshift : list, optional
The numerical value by which the time vector of the simulation
is shifted in seconds. The default is 0.
initialTime : float, optional
Time to begin simulation from (s).
residenceTime : float, optional
The time which the reactor will be run until. The default is 1.0.
Returns
-------
None.
'''
if processor!=None and cti_path!="":
print("Error: Cannot give both a processor and a cti file path, pick one")
elif processor==None and cti_path=="":
print("Error: Must give either a processor or a cti file path")
if processor != None:
self.processor = processor
elif cti_path!="":
self.processor = ctp.Processor(cti_path)
self.pressure=pressure
self.temperatures=temperatures
self.observables=observables
self.kineticSens=kineticSens
self.physicalSens=physicalSens
self.conditions=conditions
self.cti_path=cti_path
self.thermalBoundary = thermalBoundary
self.mechanicalBoundary=mechanicalBoundary
self.kineticSensitivities= None
self.experimentalData = None
self.concentrationObservables = concentrationObservables
self.moleFractionObservables = moleFractionObservables
self.fullParsedYamlFile = fullParsedYamlFile
#self.energycon='off'
self.timeshifts=timeshifts
self.timeHistory = None
self.experimentalData = None
self.initialTime=initialTime
self.residenceTimes = residenceTimes
self.finalTimes = list(np.array(self.timeshifts) + np.array(self.residenceTimes))
self.save_physSensHistories = save_physSensHistories
self.save_timeHistories = save_timeHistories
#self.yaml_file=yaml_file
if save_timeHistories == 1:
self.timeHistories=[]
self.fullTimeHistories=[]
self.temp_arrays=[]
else:
self.timeHistories=None
if save_physSensHistories == 1:
self.physSensHistories = []
#self.setTPX()
self.dk = [0]
self.solution=None
def run(self,ksens_marker=1,psens_marker=1):
'''
Function to run a flow reactor simulation looping over multiple
temperatures.
Parameters
----------
ksens_marker : int, optional
If 1 kinetic sensitivity on, if 0 off. The default is 1.
psens_marker : int, optional
If 1 physical sensitivity on, if 0 off.. The default is 1.
Returns
-------
solution : Pandas Data Frame
Data frame that contains a temperature history of the reactor.
ksens : numpy array
Numpy array that contains kinetic sensitivities.
'''
solution=[]
ksens=[]
ksens_1stIter=False
#print(self.conditions)
for i in range(len(self.temperatures)):
temp_flow=flow_reactor(pressure=self.pressure,
temperature=self.temperatures[i],
observables=self.observables,
kineticSens=self.kineticSens,
physicalSens=self.physicalSens,
conditions=self.conditions,
thermalBoundary=self.thermalBoundary,
mechanicalBoundary=self.mechanicalBoundary,
processor=self.processor,
cti_path=self.cti_path,
save_physSensHistories=self.save_physSensHistories,
moleFractionObservables=self.moleFractionObservables,
concentrationObservables=self.concentrationObservables,
fullParsedYamlFile=self.fullParsedYamlFile,
save_timeHistories=self.save_timeHistories,
timeshift=self.timeshifts[i],
initialTime=self.initialTime,
residenceTime=self.residenceTimes[i])
#res_time_data,k_sens=temp_flow.run_single(ksens=self.kineticSens,psens=self.physicalSens)
res_time_data,k_sens,fullTimeHistory,temp_array=temp_flow.run_single(ksens_marker=ksens_marker,psens_marker=psens_marker)
if self.kineticSens==1:
self.fullTimeHistories.append(fullTimeHistory)
self.temp_arrays.append(temp_array)
temp=[]
temp1=[]
temp=copy.deepcopy(res_time_data)
#print(temp)
temp1=copy.deepcopy(k_sens)
#print(a)
solution.append(temp)
if not ksens_1stIter and self.kineticSens==1:
ksens=temp1
ksens_1stIter=True
elif self.kineticSens==1 and ksens_1stIter:
ksens=np.vstack([ksens,temp1])
#print(ksens)
solution=pd.concat(solution)
#print(np.shape(ksens))
#print(self.timeHistories)
#print(solution)
if self.timeHistories != None:
self.timeHistories.append(solution)
self.kineticSensitivities=ksens
return (solution,ksens)
def sensitivity_adjustment(self,temp_del:float=0.0,
pres_del:float=0.0,
spec_pair:(str,float)=('',0.0),
res_del:float=0.0):
'''
Passes the Perturbed observable to the setTPX function. Temperature and pressure
are passed and set directly species need to go through an additional step in the
setTPX function.
'''
#this is where we would make the dk fix
if temp_del != 0.0:
self.dk.append(temp_del)
if pres_del != 0.0:
self.dk.append(pres_del)
if spec_pair[1] != 0.0:
self.dk.append(spec_pair[1])
temptemp=copy.deepcopy(self.temperatures)
temppres=copy.deepcopy(self.pressure)
tempcond=copy.deepcopy(self.conditions)
kin_temp = self.kineticSens
self.kineticSens = 0
if spec_pair[0] != '':
self.temperatures=np.array(self.temperatures)+temp_del*np.array(self.temperatures)
#self.pressures=np.array(self.pressures)+pres_del*np.array(self.pressures)
self.pressure=self.pressure+pres_del*self.pressure
xj=self.conditions[spec_pair[0]]
delxj=spec_pair[1]*self.conditions[spec_pair[0]]
#print(xj,delxj)
self.conditions[spec_pair[0]]=np.divide(np.multiply(xj+delxj,1-xj),1-xj-delxj)
# self.setTPX(self.temperature+self.temperature*temp_del,
# self.pressure+self.pressure*pres_del,
# {spec_pair[0]:self.conditions[spec_pair[0]]*spec_pair[1]})
else:
self.temperatures=np.array(self.temperatures)+temp_del*np.array(self.temperatures)
#self.pressures=np.array(self.pressure)+pres_del*np.array(self.pressure)
self.pressure=self.pressure+pres_del*self.pressure
#self.residence_time=self.residence_time+res_del*self.residence_time
# self.setTPX(self.temperature+self.temperature*temp_del,
# self.pressure+self.pressure*pres_del)
data,trash = self.run(ksens_marker=0,psens_marker=1) #Ignore trash, just temp storage for empty kinetic sens array
#print(data)
#data = sim.Simulation.sensitivity_adjustment(self,temp_del,pres_del,spec_pair)
self.temperatures=temptemp
self.pressures=temppres
self.conditions=tempcond
self.kineticSens = kin_temp
return data
def species_adjustment(self,spec_del:float=0.0):
inert_species=['Ar','AR','HE','He','Kr','KR',
'Xe','XE','NE','Ne']
'''
Creates tuples of specie that need to be perturbed and the
percent value by which to perturb its mole fraction
'''
# gets the mole fraction and the species which are going to be
#perturbed in order to run a sensitivity calculation
data = []
for x in self.conditions.keys():
if x not in inert_species:
data.append(self.sensitivity_adjustment(spec_pair=(x,spec_del)))
return data
def importExperimentalData(self,csvFileList):
print('Importing flow reactor data the following csv files...')
print(csvFileList)
experimentalData = [
|
pd.read_csv(csv)
|
pandas.read_csv
|
import numpy as np
import io
import os
import psycopg2
import flask
from flask import Flask, request, jsonify, render_template, Response
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
import snscrape.modules.twitter as sntwitter
import pandas as pd
from nltk.tokenize import TweetTokenizer
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from tqdm import tqdm
import string
import re as re
from bokeh.plotting import figure, output_file, show
from bokeh.embed import components
import base64
#Initializing the application name [here, the name is app]
app = Flask(__name__)
DATABASE_URL = 'postgresql://yhvwlkefgakryo:64548ee24c94aa91c69a7360e787dce102b33cf0a69a1c5aaa984831f72fbe39@ec2-54-166-37-125.compute-1.amazonaws.com:5432/d7sq0s42rmtm2j'
#Loading the model created in model.py
#model = pickle.load(open('model.pkl', 'rb'))
#Starting the app by rendering the index.html page
@app.route('/')
def home():
return render_template('index.html')
@app.route('/plot',methods=['GET'])
def plot_png():
con = psycopg2.connect('DATABASE_URL')
cur = con.cursor()
query = f"""SELECT * FROM DATABASE"""
results = pd.read_sql(query, con)
fig = Figure()
k = pd.read_csv("Twitter_stock_final_dataset.csv")
k["Date"] =
|
pd.to_datetime(k[['Day','Month','Year']])
|
pandas.to_datetime
|
import os
import numpy as np
import pandas as pd
from collections import OrderedDict
def read_ephys_info_from_excel_2017(excel_file, skiprows_animal=1, skiprows_cell=5):
# read Ex and In solutions from the first two lines
excelname = os.path.basename(excel_file)
excelname = os.path.splitext(excelname)[0]
animal_info = pd.read_excel(excel_file, header=0, skiprows=skiprows_animal)[:1]
animal_info = animal_info[[x for x in animal_info.columns if not 'Unnamed' in x]]
animal_info.columns = animal_info.columns.str.strip()
animal_info['excelname'] = excelname
metadata =
|
pd.read_excel(excel_file, skiprows=skiprows_cell)
|
pandas.read_excel
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2020/11/8 17:28
Desc: 新浪财经-债券-沪深债券-实时行情数据和历史行情数据
http://vip.stock.finance.sina.com.cn/mkt/#hs_z
"""
import datetime
import re
from mssdk.utils import demjson
import pandas as pd
import requests
from py_mini_racer import py_mini_racer
from tqdm import tqdm
from mssdk.bond.cons import (
zh_sina_bond_hs_count_url,
zh_sina_bond_hs_payload,
zh_sina_bond_hs_url,
zh_sina_bond_hs_hist_url,
)
from mssdk.stock.cons import hk_js_decode
def get_zh_bond_hs_page_count() -> int:
"""
行情中心首页-债券-沪深债券的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_z
:return: 总页数
:rtype: int
"""
params = {
"node": "hs_z",
}
res = requests.get(zh_sina_bond_hs_count_url, params=params)
page_count = int(re.findall(re.compile(r"\d+"), res.text)[0]) / 80
if isinstance(page_count, int):
return page_count
else:
return int(page_count) + 1
def bond_zh_hs_spot() -> pd.DataFrame:
"""
新浪财经-债券-沪深债券-实时行情数据, 大量抓取容易封IP
http://vip.stock.finance.sina.com.cn/mkt/#hs_z
:return: 所有沪深债券在当前时刻的实时行情数据
:rtype: pandas.DataFrame
"""
big_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
from typing import Optional
import random
import pandas as pd
import numpy as np
class replayHistory():
def __init__(self, staffNumber: int, staffCapcity: int, incrementStep: float, incrementQuitBefore: int, incrementQuitAfter: int, incrementQuitTotal: int, duration: int, incrementLabel: Optional[str]=None):
self.incrementTracker = 0
self.incrementLabel = incrementLabel
self.incrementStep = incrementStep
self.staffNumber = staffNumber
self.staffCapcity = staffCapcity
self.incrementQuitBefore = incrementQuitBefore
self.incrementQuitAfter = incrementQuitAfter
self.incrementQuitTotal = incrementQuitTotal
self.duration = duration
self.requiredInputDataFrameColumns = ['actualDuration', 'availableIncrementStep', 'rank']
self.staffQueue = {i: {'availableQueue': self.staffCapcity} for i in range(1, self.staffNumber + 1)}
self.staffQueueColumnNames = ['staff', 'incrementStep', 'availableQueue']
def _replaceIncrementLabel(self):
if self.incrementLabel:
self.dfProcess.columns = self.dfProcess.columns.str.replace('incrementStep', self.incrementLabel.lower()).str.replace('IncrementStep', self.incrementLabel.title())
self.dfStaffQueue.columns = self.dfStaffQueue.columns.str.replace('incrementStep', self.incrementLabel.lower()).str.replace('IncrementStep', self.incrementLabel.title())
def _checkInputDataFrame(self) -> bool:
if not all([column in self.dfProcess.columns and pd.api.types.is_numeric_dtype(self.dfProcess[column]) for column in self.requiredInputDataFrameColumns]):
print('The columns {} are all required in the input dataframe and must be numeric type'.format(self.requiredInputDataFrameColumns))
return False
return True
def _generateCommonConditions(self) -> (pd.Series, pd.Series, pd.Series, pd.Series):
hasNotComplete = self.dfProcess['completeIncrementStep'].isnull()
hasNotQuitCondition = self.dfProcess['quitIncrementStep'].isnull()
hasNotStartedCondition = self.dfProcess['startIncrementStep'].isnull()
isAvailableCondition = self.dfProcess['availableIncrementStep'] <= self.incrementTracker
return hasNotComplete, hasNotQuitCondition, hasNotStartedCondition, isAvailableCondition
def _updateTracking(self, df: pd.DataFrame, currentQueueAdd: bool):
self.dfProcess.update(df)
for staff, count in df['staff'].value_counts().iteritems():
self.staffQueue[staff]['availableQueue'] -= count if currentQueueAdd else count * -1
def _trackIncrementStartingQueue(self):
data=[[staff, self.incrementTracker, queue['availableQueue']] for staff, queue in self.staffQueue.items()]
dfIncrementStart = pd.DataFrame(data=data, columns=self.staffQueueColumnNames)
self.dfStaffQueue = pd.concat([self.dfStaffQueue, dfIncrementStart])
def _quit(self):
hasNotComplete, hasNotQuitCondition, hasNotStartedCondition, isAvailableCondition = self._generateCommonConditions()
daysSinceAvailable = self.incrementTracker - self.dfProcess['availableIncrementStep']
daysAfterStart = self.incrementTracker - self.dfProcess['startIncrementStep']
exceedWaitBeforeStartCondition = daysSinceAvailable >= self.incrementQuitBefore
exceedWaitAfterStartCondition = daysAfterStart >= self.incrementQuitAfter
exceedWaitTotalCondition = daysSinceAvailable >= self.incrementQuitTotal
quitBeforeStartCondition = hasNotComplete & hasNotQuitCondition & hasNotStartedCondition & exceedWaitBeforeStartCondition
quitAfterStartCondition = hasNotComplete & hasNotQuitCondition & ~hasNotStartedCondition & exceedWaitAfterStartCondition
quitTotalTimeCondition = hasNotComplete & hasNotQuitCondition & exceedWaitTotalCondition
dfQuit = self.dfProcess[quitBeforeStartCondition | quitAfterStartCondition | quitTotalTimeCondition].copy()
dfQuit['quitIncrementStep'] = self.incrementTracker
self._updateTracking(df=dfQuit, currentQueueAdd=False) if not dfQuit.empty else None
def _complete(self):
hasNotComplete, hasNotQuitCondition, hasNotStartedCondition, isAvailableCondition = self._generateCommonConditions()
durationCompletedCondition = (self.incrementTracker - self.dfProcess['startIncrementStep']) >= self.dfProcess['actualDuration']
dfComplete = self.dfProcess[hasNotComplete & hasNotQuitCondition & durationCompletedCondition].copy()
dfComplete['completeIncrementStep'] = self.incrementTracker
self._updateTracking(df=dfComplete, currentQueueAdd=False) if not dfComplete.empty else None
def _select(self):
hasNotComplete, hasNotQuitCondition, hasNotStartedCondition, isAvailableCondition = self._generateCommonConditions()
staffAvailability = [staff for staff, queueInformation in self.staffQueue.items() for x in range(queueInformation['availableQueue'])]
random.shuffle(staffAvailability)
dfTotalPickup = self.dfProcess[hasNotQuitCondition & hasNotStartedCondition & isAvailableCondition].sort_values('rank').iloc[0:len(staffAvailability)].copy()
staffAssignment = staffAvailability[0:len(dfTotalPickup)]
dfTotalPickup['startIncrementStep'] = self.incrementTracker
dfTotalPickup['staff'] = staffAssignment
self._updateTracking(df=dfTotalPickup, currentQueueAdd=True) if not dfTotalPickup.empty else None
def run(self, dfQueue: pd.DataFrame, saveResults: Optional[bool] = False):
self.dfProcess = dfQueue.copy()
self.dfProcess[['startIncrementStep', 'completeIncrementStep', 'quitIncrementStep', 'staff']] = np.nan
self.dfStaffQueue =
|
pd.DataFrame(columns=self.staffQueueColumnNames)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import requests
import json
import os
class ScopusModified(object):
def __init__(self, apikey=None):
self.apikey = apikey
def _parse_author(self, entry):
#print(entry)
author_id = entry['dc:identifier'].split(':')[-1]
lastname = entry['preferred-name']['surname']
firstname = entry['preferred-name']['given-name']
doc_count = int(entry['document-count'])
# affiliations
if 'affiliation-current' in entry:
affil = entry['affiliation-current']
try:
institution_name = affil['affiliation-name']
except:
institution_name = None
try:
institution_id = affil['affiliation-id']
except:
institution_id = None
else:
institution_name = None
institution_id = None
#city = affil.find('affiliation-city').text
#country = affil.find('affiliation-country').text
#affiliation = institution + ', ' + city + ', ' + country
return pd.Series({'author_id': author_id, 'name': firstname + ' ' + lastname, 'document_count': doc_count,\
'affiliation': institution_name, 'affiliation_id': institution_id})
def _parse_article(self, entry):
try:
scopus_id = entry['dc:identifier'].split(':')[-1]
except:
scopus_id = None
try:
title = entry['dc:title']
except:
title = None
try:
publicationname = entry['prism:publicationName']
except:
publicationname = None
try:
issn = entry['prism:issn']
except:
issn = None
try:
isbn = entry['prism:isbn']
except:
isbn = None
try:
eissn = entry['prism:eIssn']
except:
eissn = None
try:
volume = entry['prism:volume']
except:
volume = None
try:
pagerange = entry['prism:pageRange']
except:
pagerange = None
try:
coverdate = entry['prism:coverDate']
except:
coverdate = None
try:
doi = entry['prism:doi']
except:
doi = None
try:
citationcount = int(entry['citedby-count'])
except:
citationcount = None
try:
affiliation = _parse_affiliation(entry['affiliation'])
except:
affiliation = None
try:
aggregationtype = entry['prism:aggregationType']
except:
aggregationtype = None
try:
sub_dc = entry['subtypeDescription']
except:
sub_dc = None
try:
author_entry = entry['author']
author_id_list = [auth_entry['authid'] for auth_entry in author_entry]
except:
author_id_list = list()
try:
link_list = entry['link']
full_text_link = None
for link in link_list:
if link['@ref'] == 'full-text':
full_text_link = link['@href']
except:
full_text_link = None
return pd.Series({'scopus_id': scopus_id, 'title': title, 'publication_name':publicationname,\
'issn': issn, 'isbn': isbn, 'eissn': eissn, 'volume': volume, 'page_range': pagerange,\
'cover_date': coverdate, 'doi': doi,'citation_count': citationcount, 'affiliation': affiliation,\
'aggregation_type': aggregationtype, 'subtype_description': sub_dc, 'authors': author_id_list,\
'full_text': full_text_link})
def _parse_entry(self, entry, type_):
if type_ == 1 or type_ == 'article':
return self._parse_article(entry)
else:
return self._parse_author(entry)
def _search_scopus(self, key, query, type_, view, index=0):
par = {'query': query, 'start': index,
'httpAccept': 'application/json', 'view': view}
insttoken = os.environ.get('INSTTOKEN')
headers = {'X-ELS-Insttoken': insttoken, 'X-ELS-APIKey': key}
if type_ == 'article' or type_ == 1:
r = requests.get("https://api.elsevier.com/content/search/scopus", params=par, headers=headers)
else:
par['view'] = 'STANDARD'
r = requests.get("https://api.elsevier.com/content/search/author", params=par, headers=headers)
js = r.json()
#print(r.url)
total_count = int(js['search-results']['opensearch:totalResults'])
entries = js['search-results']['entry']
result_df = pd.DataFrame([self._parse_entry(entry, type_) for entry in entries])
if index == 0:
return(result_df, total_count)
else:
return(result_df)
def search(self, query, count=100, type_=1, view='COMPLETE'):
if type(count) is not int:
raise ValueError("%s is not a valid input for the number of entries to return." %number)
result_df, total_count = self._search_scopus(self.apikey, query, type_, view)
if total_count <= count:
count = total_count
if count <= 25:
# if less than 25, just one page of response is enough
return result_df[:count]
# if larger than, go to next few pages until enough
i = 1
while True:
index = 25*i
result_df = result_df.append(self._search_scopus(self.apikey, query, type_, view=view, index=index),
ignore_index=True)
if result_df.shape[0] >= count:
return result_df[:count]
i += 1
def parse_citation(self, js_citation, year_range):
resp = js_citation['abstract-citations-response']
cite_info_list = resp['citeInfoMatrix']['citeInfoMatrixXML']['citationMatrix']['citeInfo']
year_range = (year_range[0], year_range[1]+1)
columns = ['scopus_id', 'previous_citation'] + [str(yr) for yr in range(*year_range)] + ['later_citation', 'total_citation', 'range_citation']
citation_df = pd.DataFrame(columns=columns)
year_arr = np.arange(year_range[0], year_range[1]+1)
for cite_info in cite_info_list:
cite_dict = {}
# dc:identifier: scopus id
cite_dict['scopus_id'] = cite_info['dc:identifier'].split(':')[-1]
# pcc: previous citation counts
try:
cite_dict['previous_citation'] = cite_info['pcc']
except:
cite_dict['previous_citation'] = pd.np.NaN
# cc: citation counts during year range
try:
cc = cite_info['cc']
except:
return
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
##############
# Data reading
##############
def original_data(thread_fp, nrows=None):
"""Read raw Twitter data"""
print("Loading threads from: %s" % thread_fp)
# reading in data
thread_df = pd.read_csv(thread_fp, nrows=nrows)
# converting id fields from string to int
thread_df['id_str'] = thread_df['id_str'].astype(int)
thread_df['usr_id_str'] = thread_df['usr_id_str'].astype(int)
if 'conversation_id' in thread_df.columns:
thread_df['conversation_id'] = thread_df['conversation_id'].astype(int)
# filtering the dataframe to only contain english language tweets
if 'lang' in thread_df.columns:
thread_df = thread_df[thread_df["lang"] == "en"]
if 'is_conv_seed' in thread_df.columns:
seed_df = thread_df[thread_df["is_conv_seed"]]
else:
seed_df = None
return thread_df, seed_df
def labeled_data(label_fp):
"""Read (manually-labeled) vaxxer annotations from file"""
print("Loading labels from: %s" % label_fp)
# reading in labeled data
label_df = pd.read_csv(label_fp, names=["timestamp", "id_str", "usr_id_str", "is_conv_seed", "label", "label_id"])
label_df = label_df.drop_duplicates(subset="id_str", keep="last")
label_df['id_str'] = label_df['id_str'].astype(int)
return label_df
#####################
# Get the clean data
#####################
def temporal_train_test_split(labeled_df, train_ratio=0.6, time_col="epoch", verbose=True):
"""Temporal train-test split for labeled tweets"""
labeled_seeds_df = labeled_df.sort_values(time_col, ascending=True).copy()
size = len(labeled_seeds_df)
train_size = int(size*train_ratio)
test_size = size-train_size
if verbose:
print("Train size:", train_size)
print("Test size:", test_size)
train_df = labeled_seeds_df.head(train_size)
test_df = labeled_seeds_df.tail(test_size)
if verbose:
print("Train label distribution:")
print(train_df["label"].value_counts().sort_index() / len(train_df))
print("Test label distribution:")
print(test_df["label"].value_counts().sort_index() / len(test_df))
return train_df, test_df
def clean_labeled_data(seed_df, drop_irrelevant=False, verbose=True):
"""Exclude low-frequency or invalid vaxxer categories"""
# "Don't know" and "Pro-choice" are excluded!
if drop_irrelevant:
seed_df = seed_df[seed_df["label"] != "Irrelevant"]
seed_df = seed_df[seed_df["label"] != "Don't know"]
seed_df = seed_df[seed_df["label"] != "Pro-choice"]
labeled_df = seed_df[~seed_df["label"].isnull()]
if verbose:
print("Number of labeled records after cleaning:", len(labeled_df))
unlabeled_df = seed_df[seed_df["label"].isnull()]
return labeled_df, unlabeled_df
class LabelIndexer():
def __init__(self):
self._options = ["Pro-vaxxer", "Irrelevant"]
# "Don't know" and "Pro-choice" are excluded!
self._other = "Vax-skeptic"
@property
def labels(self):
labels = self._options + [self._other]
return labels.copy()
@property
def num_categories(self):
return len(self._options) + 1
def label2idx(self, label):
if label in self._options:
return self._options.index(label)
else:
return len(self._options)
def idx2label(self, index):
if index < len(self._options):
return self._options[index]
else:
return self._other
def get_index_mapping(self):
return {idx:self.idx2label(idx) for idx in range(self.num_categories)}
def text_and_label(df):
"""Encode text labels to numerical values"""
indexer = LabelIndexer()
texts = df["full_text"].values
labels = [indexer.label2idx(label) for label in df.label.values]
return texts, labels
def get_train_test_data(thread_fp, label_fp, train_ratio=0.6, meta_cols=["id_str","usr_id_str"], drop_irrelevant=False, visualize=True, verbose=True):
"""Load data with temporal train-test split"""
# reading in data
thread_df, seed_df = original_data(thread_fp)
label_df = labeled_data(label_fp)
if verbose:
print("Number of labeled records:", len(label_df))
# joining the seed dataframe with the labels by id_str
seed_df = pd.merge(seed_df, label_df.drop(['usr_id_str', 'is_conv_seed'], axis=1), on="id_str", how="left")
labeled_df, unlabeled_df = clean_labeled_data(seed_df, drop_irrelevant, verbose)
if visualize:
fig, ax = plt.subplots(1,1,figsize=(15,6))
plt.title("Number of daily labeled tweets")
labeled_df["date"].value_counts().sort_index().plot(ax=ax)
train_df, test_df = temporal_train_test_split(labeled_df, train_ratio, verbose=verbose)
tr_text, tr_label = text_and_label(train_df)
te_text, te_label = text_and_label(test_df)
tr_meta, te_meta = train_df[meta_cols], test_df[meta_cols]
return tr_text, tr_label, tr_meta, te_text, te_label, te_meta, unlabeled_df
################
# Classification
################
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix, f1_score, roc_auc_score
def metrics2df(metrics_dict):
records = []
for model in metrics_dict:
for part in metrics_dict[model]:
for metric in metrics_dict[model][part]:
score = metrics_dict[model][part][metric]
records.append([model, part, metric, score])
return pd.DataFrame(records, columns=["model","part","metric","score"])
def calculate_metrics(model, X, y, multiclass=False, show_confusion_matrix=False, verbose=True):
y_pred = model.predict(X)
y_proba = model.predict_proba(X)
if show_confusion_matrix:
cm = confusion_matrix(y, y_pred)
print(model)
print(cm)
acc = accuracy_score(y, y_pred)
f1 = f1_score(y, y_pred, average='macro' if multiclass else 'binary')
if verbose:
print("Accuracy:", acc)
print("F-score (macro):", f1)
auc = None
if not multiclass:
auc = roc_auc_score(y, y_proba[:, 1])
if verbose:
print("Roc Auc score:", auc)
metrics = {
"acc":acc,
"f1":f1,
"auc":auc,
}
return metrics, y_pred, y_proba
def evaluate_classifier(model, data, multiclass=False, show_confusion_matrix=False, verbose=True):
X_train, X_test, y_train, y_test = data
model = model.fit(X_train, y_train)
results = {}
if verbose:
print(model)
print("TRAIN:")
results["train"] = calculate_metrics(model, X_train, y_train, multiclass, show_confusion_matrix, verbose)[0]
if verbose:
print("TEST:")
results["test"] = calculate_metrics(model, X_test, y_test, multiclass, show_confusion_matrix, verbose)[0]
if verbose:
print()
return results
def evaluate_classifiers(model_tuples, vectors, labels, vectors_test=None, labels_test=None, test_size=0.3, multiclass=False, show_confusion_matrix=False, verbose=True):
names, classifiers = zip(*model_tuples)
if vectors_test is None:
train_test_data = train_test_split(vectors, labels, random_state=0, test_size=test_size)
else:
train_test_data = (vectors, vectors_test, labels, labels_test)
results = {}
for i in range(len(classifiers)):
results[names[i]] = evaluate_classifier(classifiers[i], train_test_data, multiclass, show_confusion_matrix, verbose)
metrics_df = metrics2df(results)
metrics_df["dimension"] = vectors.shape[1]
return metrics_df
from collections import deque
def dynamic_auc(df, window=7*86400):
"""Calculate AUC over time with the pre-defined time window."""
q = deque([])
last_date = None
metrics = []
for _, row in df.sort_values("epoch").iterrows():
if last_date == None:
last_date = row["date"]
record = (row["epoch"], row["label"], row["proba"])
if row["date"] != last_date:
current_time = record[0]
while current_time - q[0][0] > window:
q.popleft()
arr = np.array(q)
if len(arr) > 1:
auc = roc_auc_score(arr[:,1], arr[:,2])
metrics.append((last_date, auc))
last_date = row["date"]
q.append(record)
return pd.DataFrame(metrics, columns=["date","score"])
###############
# Visualization
###############
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def show_dynamic_auc(configs, predictions, badrate, window=7*86400, markers=["circle","square","star","triangle-up"]):
"""Show AUC over time with the pre-defined time window."""
parts = []
fig = make_subplots(specs=[[{"secondary_y": True}]])
for idx, key in enumerate(configs):
if idx == 3:
continue
tmp_auc = dynamic_auc(predictions[idx], window)
tmp_auc["experiment_id"] = key
parts.append(tmp_auc)
fig.add_trace(
go.Scatter(
x=tmp_auc["date"],
y=tmp_auc["score"],
name=key,
mode='lines+markers',
marker_symbol=markers[idx]
),
row=1, col=1, secondary_y=False)
metrics_df = pd.concat(parts)
dates = list(metrics_df["date"].unique())
fig.add_trace(
go.Scatter(x=dates, y=badrate[dates], name="Vax-skeptic rate", line=dict(dash="dot")),
row=1, col=1, secondary_y=True)
fig.update_layout(
yaxis_title="AUC",
font_size=18,
paper_bgcolor='white',
plot_bgcolor='white',
)
fig.update_yaxes(title_text="Vax-skeptic rate", secondary_y=True)
return fig
################
# Model stacking
################
def calculate_sequential_stats(df, id_col, user_col, value_cols=None, aggregations=["mean"]):
"""
Aggregate user statistics sequentially. The input dataframe 'df' must be pre-ordered!
"""
if value_cols == None:
value_cols = list(df.columns)
value_cols.remove(id_col)
value_cols.remove(user_col)
user_records = {}
agg_records = {agg_type:[] for agg_type in aggregations}
for idx in tqdm(range(len(df)), mininterval=60):
row = df.iloc[idx]
id_, user_, values_ = str(row[id_col]), str(row[user_col]), list(row[value_cols])
#print(user_)
if len(user_records.get(user_,[])) > 0:
user_history = pd.DataFrame(user_records[user_], columns=value_cols)
for agg_type in aggregations:
agg_records[agg_type].append(list(user_history.agg(agg_type)))
else:
for agg_type in aggregations:
agg_records[agg_type].append([None] * len(value_cols))
if pd.isnull(values_).sum() == 0:
if not user_ in user_records:
user_records[user_] = []
user_records[user_].append(values_)
agg_df = df.copy()
for agg_type in aggregations:
tmp_df =
|
pd.DataFrame(agg_records[agg_type], index=agg_df.index, columns=[agg_type+"_"+col for col in value_cols])
|
pandas.DataFrame
|
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '{end_dt}'".format(end_dt=end_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self, setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '{beg_dt}' & index >= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes(self, setup_path):
# GH 3499, losing frequency info on index recreation
df = DataFrame(
dict(A=Series(range(3), index=date_range("2000-1-1", periods=3, freq="H")))
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "data")
store.put("data", df, format="table")
result = store.get("data")
tm.assert_frame_equal(df, result)
for attr in ["freq", "tz", "name"]:
for idx in ["index", "columns"]:
assert getattr(getattr(df, idx), attr, None) == getattr(
getattr(result, idx), attr, None
)
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("data", df2)
assert store.get_storer("data").info["index"]["freq"] is None
# this is ok
_maybe_remove(store, "df2")
df2 = DataFrame(
dict(
A=Series(
range(3),
index=[
Timestamp("20010101"),
Timestamp("20010102"),
Timestamp("20020101"),
],
)
)
)
store.append("df2", df2)
df3 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("df2", df3)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes2(self, setup_path):
with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
df = DataFrame(
dict(
A=Series(
range(3), index=date_range("2000-1-1", periods=3, freq="H")
)
)
)
df.to_hdf(path, "data", mode="w", append=True)
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
df2.to_hdf(path, "data", append=True)
idx = date_range("2000-1-1", periods=3, freq="H")
idx.name = "foo"
df = DataFrame(dict(A=Series(range(3), index=idx)))
df.to_hdf(path, "data", mode="w", append=True)
assert read_hdf(path, "data").index.name == "foo"
with catch_warnings(record=True):
idx2 = date_range("2001-1-1", periods=3, freq="H")
idx2.name = "bar"
df2 = DataFrame(dict(A=Series(range(3), index=idx2)))
df2.to_hdf(path, "data", append=True)
assert read_hdf(path, "data").index.name is None
def test_frame_select(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
with pytest.raises(ValueError):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(self, setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", "(index>df.index[3] & " 'index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
with pytest.raises(NotImplementedError):
store.select("df", '~(string="bar")')
# invert ok for filters
result = store.select("df", "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(["A", "B"])]
tm.assert_frame_equal(result, expected)
# in
result = store.select("df", "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self, setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({"A": [1, 1, 2, 2, 3]})
parms.to_hdf(pp, "df", mode="w", format="table", data_columns=["A"])
selection = read_hdf(pp, "df", where="A=[2,3]")
hist = DataFrame(
np.random.randn(25, 1),
columns=["data"],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
),
)
hist.to_hdf(hh, "df", mode="w", format="table")
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
# scope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select("df", where="l1=l")
tm.assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, "df", where="l1=l")
tm.assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, "df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
# scope with index
store = HDFStore(hh)
result = store.select("df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self, setup_path):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
# not implemented
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A'] | columns=['B']")
# in theory we could deal with this
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A','B'] & columns=['C']")
def test_string_select(self, setup_path):
# GH 2973
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df["x"] = "none"
df.loc[2:7, "x"] = ""
store.append("df", df, data_columns=["x"])
result = store.select("df", "x=none")
expected = df[df.x == "none"]
tm.assert_frame_equal(result, expected)
result = store.select("df", "x!=none")
expected = df[df.x != "none"]
tm.assert_frame_equal(result, expected)
df2 = df.copy()
df2.loc[df2.x == "", "x"] = np.nan
store.append("df2", df2, data_columns=["x"])
result = store.select("df2", "x!=none")
expected = df2[isna(df2.x)]
tm.assert_frame_equal(result, expected)
# int ==/!=
df["int"] = 1
df.loc[2:7, "int"] = 2
store.append("df3", df, data_columns=["int"])
result = store.select("df3", "int=2")
expected = df[df.int == 2]
tm.assert_frame_equal(result, expected)
result = store.select("df3", "int!=2")
expected = df[df.int != 2]
tm.assert_frame_equal(result, expected)
def test_read_column(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# GH 17912
# HDFStore.select_column should raise a KeyError
# exception if the key is not a valid store
with pytest.raises(KeyError, match="No object named df in the file"):
store.select_column("df", "index")
store.append("df", df)
# error
with pytest.raises(
KeyError, match=re.escape("'column [foo] not found in the table'")
):
store.select_column("df", "foo")
with pytest.raises(Exception):
store.select_column("df", "index", where=["index>5"])
# valid
result = store.select_column("df", "index")
tm.assert_almost_equal(result.values, Series(df.index).values)
assert isinstance(result, Series)
# not a data indexable column
with pytest.raises(ValueError):
store.select_column("df", "values_block_0")
# a data column
df2 = df.copy()
df2["string"] = "foo"
store.append("df2", df2, data_columns=["string"])
result = store.select_column("df2", "string")
tm.assert_almost_equal(result.values, df2["string"].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3["string"] = "foo"
df3.loc[4:6, "string"] = np.nan
store.append("df3", df3, data_columns=["string"])
result = store.select_column("df3", "string")
tm.assert_almost_equal(result.values, df3["string"].values)
# start/stop
result = store.select_column("df3", "string", start=2)
tm.assert_almost_equal(result.values, df3["string"].values[2:])
result = store.select_column("df3", "string", start=-2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:])
result = store.select_column("df3", "string", stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[:2])
result = store.select_column("df3", "string", stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[:-2])
result = store.select_column("df3", "string", start=2, stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[2:-2])
result = store.select_column("df3", "string", start=-2, stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({"A": np.random.randn(10), "B": "foo"})
store.append("df4", df4, data_columns=True)
expected = df4["B"]
result = store.select_column("df4", "B")
tm.assert_series_equal(result, expected)
def test_coordinates(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append("df", df)
# all
c = store.select_as_coordinates("df")
assert (c.values == np.arange(len(df.index))).all()
# get coordinates back & test vs frame
_maybe_remove(store, "df")
df = DataFrame(dict(A=range(5), B=range(5)))
store.append("df", df)
c = store.select_as_coordinates("df", ["index<3"])
assert (c.values == np.arange(3)).all()
result = store.select("df", where=c)
expected = df.loc[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates("df", ["index>=3", "index<=4"])
assert (c.values == np.arange(2) + 3).all()
result = store.select("df", where=c)
expected = df.loc[3:4, :]
tm.assert_frame_equal(result, expected)
assert isinstance(c, Index)
# multiple tables
_maybe_remove(store, "df1")
_maybe_remove(store, "df2")
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
c = store.select_as_coordinates("df1", ["A>0", "B>0"])
df1_result = store.select("df1", c)
df2_result = store.select("df2", c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(setup_path) as store:
df = DataFrame(
np.random.randn(1000, 2), index=date_range("20000101", periods=1000)
)
store.append("df", df)
c = store.select_column("df", "index")
where = c[DatetimeIndex(c).month == 5].index
expected = df.iloc[where]
# locations
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# boolean
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# invalid
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df), dtype="float64"))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df) + 1))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5)
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5, stop=10)
# selection with filter
selection = date_range("20000101", periods=500)
result = store.select("df", where="index in selection")
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result, expected)
# list
df = DataFrame(np.random.randn(10, 2))
store.append("df2", df)
result = store.select("df2", where=[0, 3, 5])
expected = df.iloc[[0, 3, 5]]
tm.assert_frame_equal(result, expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select("df2", where=where)
expected = df.loc[where]
tm.assert_frame_equal(result, expected)
# start/stop
result = store.select("df2", start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# exceptions
with pytest.raises(ValueError):
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df3"
)
with pytest.raises(ValueError):
store.append_to_multiple({"df1": None, "df2": None}, df, selector="df3")
with pytest.raises(ValueError):
store.append_to_multiple("df1", df, "df1")
# regular operation
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df1"
)
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df1", dropna=True
)
result = store.select_as_multiple(["df1", "df2"])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select("df1").index, store.select("df2").index)
@pytest.mark.xfail(
run=False, reason="append_to_multiple_dropna_false is not raising as failed"
)
def test_append_to_multiple_dropna_false(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{"df1a": ["A", "B"], "df2a": None}, df, selector="df1a", dropna=False
)
with pytest.raises(ValueError):
store.select_as_multiple(["df1a", "df2a"])
assert not store.select("df1a").index.equals(store.select("df2a").index)
def test_select_as_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
with ensure_clean_store(setup_path) as store:
# no tables stored
with pytest.raises(Exception):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
# exceptions
with pytest.raises(Exception):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
with pytest.raises(Exception):
store.select_as_multiple([None], where=["A>0", "B>0"], selector="df1")
msg = "'No object named df3 in the file'"
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(["df3"], where=["A>0", "B>0"], selector="df1")
with pytest.raises(KeyError, match="'No object named df4 in the file'"):
store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df4"
)
# default select
result = store.select("df1", ["A>0", "B>0"])
expected = store.select_as_multiple(
["df1"], where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
"df1", where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(
["df1", "df2"], where="index>df2.index[4]", selector="df2"
)
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test exception for diff rows
store.append("df3", tm.makeTimeDataFrame(nper=50))
with pytest.raises(ValueError):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
@pytest.mark.skipif(
LooseVersion(tables.__version__) < LooseVersion("3.1.0"),
reason=("tables version does not support fix for nan selection bug: GH 4858"),
)
def test_nan_selection_bug_4858(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(cols=range(6), values=range(6)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(
dict(cols=["13.0", "14.0", "15.0"], values=[3.0, 4.0, 5.0]),
index=[3, 4, 5],
)
# write w/o the index on that particular column
store.append("df", df, data_columns=True, index=["cols"])
result = store.select("df", where="values>2.0")
tm.assert_frame_equal(result, expected)
def test_start_stop_table(self, setup_path):
with ensure_clean_store(setup_path) as store:
# table
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
store.append("df", df)
result = store.select("df", "columns=['A']", start=0, stop=5)
expected = df.loc[0:4, ["A"]]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", "columns=['A']", start=30, stop=40)
assert len(result) == 0
expected = df.loc[30:40, ["A"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_multiple(self, setup_path):
# GH 16209
with ensure_clean_store(setup_path) as store:
df = DataFrame({"foo": [1, 2], "bar": [1, 2]})
store.append_to_multiple(
{"selector": ["foo"], "data": None}, df, selector="selector"
)
result = store.select_as_multiple(
["selector", "data"], selector="selector", start=0, stop=1
)
expected = df.loc[[0], ["foo", "bar"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_fixed(self, setup_path):
with ensure_clean_store(setup_path) as store:
# fixed, GH 8287
df = DataFrame(
dict(A=np.random.rand(20), B=np.random.rand(20)),
index=pd.date_range("20130101", periods=20),
)
store.put("df", df)
result = store.select("df", start=0, stop=5)
expected = df.iloc[0:5, :]
tm.assert_frame_equal(result, expected)
result = store.select("df", start=5, stop=10)
expected = df.iloc[5:10, :]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", start=30, stop=40)
expected = df.iloc[30:40, :]
tm.assert_frame_equal(result, expected)
# series
s = df.A
store.put("s", s)
result = store.select("s", start=0, stop=5)
expected = s.iloc[0:5]
tm.assert_series_equal(result, expected)
result = store.select("s", start=5, stop=10)
expected = s.iloc[5:10]
tm.assert_series_equal(result, expected)
# sparse; not implemented
df = tm.makeDataFrame()
df.iloc[3:5, 1:3] = np.nan
df.iloc[8:10, -2] = np.nan
def test_select_filter_corner(self, setup_path):
df = DataFrame(np.random.randn(50, 100))
df.index = ["{c:3d}".format(c=c) for c in df.index]
df.columns = ["{c:3d}".format(c=c) for c in df.columns]
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
crit = "columns=df.columns[:75]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75]])
crit = "columns=df.columns[:75:2]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]])
def test_path_pathlib(self, setup_path):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: pd.read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize("start, stop", [(0, 2), (1, 2), (None, None)])
def test_contiguous_mixed_data_table(self, start, stop, setup_path):
# GH 17021
# ValueError when reading a contiguous mixed-data table ft. VLArray
df = DataFrame(
{
"a": Series([20111010, 20111011, 20111012]),
"b": Series(["ab", "cd", "ab"]),
}
)
with ensure_clean_store(setup_path) as store:
store.append("test_dataset", df)
result = store.select("test_dataset", start=start, stop=stop)
tm.assert_frame_equal(df[start:stop], result)
def test_path_pathlib_hdfstore(self, setup_path):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, "df")
result = tm.round_trip_pathlib(writer, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath(self, setup_path):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: pd.read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
def test_path_localpath_hdfstore(self, setup_path):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, "df")
result = tm.round_trip_localpath(writer, reader)
tm.assert_frame_equal(df, result)
def _check_roundtrip(self, obj, comparator, path, compression=False, **kwargs):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(
self, obj, comparator, path, compression=False, **kwargs
):
options = {}
if compression:
options["complib"] = compression or _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
store["obj"] = retrieved
again = store["obj"]
comparator(again, obj, **kwargs)
def _check_roundtrip_table(self, obj, comparator, path, compression=False):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store.put("obj", obj, format="table")
retrieved = store["obj"]
comparator(retrieved, obj)
def test_multiple_open_close(self, setup_path):
# gh-4409: open & close multiple times
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
# single
store = HDFStore(path)
assert "CLOSED" not in store.info()
assert store.is_open
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
with ensure_clean_path(setup_path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
with pytest.raises(ValueError):
HDFStore(path)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
assert "CLOSED" not in store1.info()
assert "CLOSED" not in store2.info()
assert store1.is_open
assert store2.is_open
store1.close()
assert "CLOSED" in store1.info()
assert not store1.is_open
assert "CLOSED" not in store2.info()
assert store2.is_open
store2.close()
assert "CLOSED" in store1.info()
assert "CLOSED" in store2.info()
assert not store1.is_open
assert not store2.is_open
# nested close
store = HDFStore(path, mode="w")
store.append("df", df)
store2 = HDFStore(path)
store2.append("df2", df)
store2.close()
assert "CLOSED" in store2.info()
assert not store2.is_open
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
# double closing
store = HDFStore(path, mode="w")
store.append("df", df)
store2 = HDFStore(path)
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
store2.close()
assert "CLOSED" in store2.info()
assert not store2.is_open
# ops on a closed store
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
store = HDFStore(path)
store.close()
with pytest.raises(ClosedFileError):
store.keys()
with pytest.raises(ClosedFileError):
"df" in store
with pytest.raises(ClosedFileError):
len(store)
with pytest.raises(ClosedFileError):
store["df"]
with pytest.raises(AttributeError):
store.df
with pytest.raises(ClosedFileError):
store.select("df")
with pytest.raises(ClosedFileError):
store.get("df")
with pytest.raises(ClosedFileError):
store.append("df2", df)
with pytest.raises(ClosedFileError):
store.put("df3", df)
with pytest.raises(ClosedFileError):
store.get_storer("df2")
with pytest.raises(ClosedFileError):
store.remove("df2")
with pytest.raises(ClosedFileError, match="file is not open"):
store.select("df")
def test_pytables_native_read(self, datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf/pytables_native.h5"), mode="r"
) as store:
d2 = store["detector/readout"]
assert isinstance(d2, DataFrame)
@pytest.mark.skipif(
is_platform_windows(), reason="native2 read fails oddly on windows"
)
def test_pytables_native2_read(self, datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "pytables_native2.h5"), mode="r"
) as store:
str(store)
d1 = store["detector"]
assert isinstance(d1, DataFrame)
@td.xfail_non_writeable
def test_legacy_table_fixed_format_read_py2(self, datapath, setup_path):
# GH 24510
# legacy table with fixed format written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_fixed_py2.h5"), mode="r"
) as store:
result = store.select("df")
expected = pd.DataFrame(
[[1, 2, 3, "D"]],
columns=["A", "B", "C", "D"],
index=pd.Index(["ABC"], name="INDEX_NAME"),
)
tm.assert_frame_equal(expected, result)
def test_legacy_table_read_py2(self, datapath, setup_path):
# issue: 24925
# legacy table written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_py2.h5"), mode="r"
) as store:
result = store.select("table")
expected = pd.DataFrame({"a": ["a", "b"], "b": [2, 3]})
tm.assert_frame_equal(expected, result)
def test_copy(self, setup_path):
with catch_warnings(record=True):
def do_copy(f, new_f=None, keys=None, propindexes=True, **kwargs):
try:
store = HDFStore(f, "r")
if new_f is None:
import tempfile
fd, new_f = tempfile.mkstemp()
tstore = store.copy(
new_f, keys=keys, propindexes=propindexes, **kwargs
)
# check keys
if keys is None:
keys = store.keys()
assert set(keys) == set(tstore.keys())
# check indices & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
assert orig_t.nrows == new_t.nrows
# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
assert new_t[a.name].is_indexed
finally:
safe_close(store)
safe_close(tstore)
try:
os.close(fd)
except (OSError, ValueError):
pass
safe_remove(new_f)
# new table
df = tm.makeDataFrame()
try:
path = create_tempfile(setup_path)
st = HDFStore(path)
st.append("df", df, data_columns=["A"])
st.close()
do_copy(f=path)
do_copy(f=path, propindexes=False)
finally:
safe_remove(path)
def test_store_datetime_fractional_secs(self, setup_path):
with ensure_clean_store(setup_path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
store["a"] = series
assert store["a"].index[0] == dt
def test_tseries_indices_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
idx = tm.makeDateIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store["a"] = ser
result = store["a"]
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
idx = tm.makePeriodIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store["a"] = ser
result = store["a"]
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
def test_tseries_indices_frame(self, setup_path):
with ensure_clean_store(setup_path) as store:
idx = tm.makeDateIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
store["a"] = df
result = store["a"]
tm.assert_frame_equal(result, df)
assert result.index.freq == df.index.freq
tm.assert_class_equal(result.index, df.index, obj="dataframe index")
idx = tm.makePeriodIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), idx)
store["a"] = df
result = store["a"]
tm.assert_frame_equal(result, df)
assert result.index.freq == df.index.freq
tm.assert_class_equal(result.index, df.index, obj="dataframe index")
def test_unicode_index(self, setup_path):
unicode_values = ["\u03c3", "\u03c3\u03c3"]
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
s = Series(np.random.randn(len(unicode_values)), unicode_values)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_unicode_longer_encoded(self, setup_path):
# GH 11234
char = "\u0394"
df = pd.DataFrame({"A": [char]})
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", encoding="utf-8")
result = store.get("df")
tm.assert_frame_equal(result, df)
df = pd.DataFrame({"A": ["a", char], "B": ["b", "b"]})
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", encoding="utf-8")
result = store.get("df")
tm.assert_frame_equal(result, df)
@td.xfail_non_writeable
def test_store_datetime_mixed(self, setup_path):
df = DataFrame({"a": [1, 2, 3], "b": [1.0, 2.0, 3.0], "c": ["a", "b", "c"]})
ts = tm.makeTimeSeries()
df["d"] = ts.index[:3]
self._check_roundtrip(df, tm.assert_frame_equal, path=setup_path)
# FIXME: don't leave commented-out code
# def test_cant_write_multiindex_table(self):
# # for now, #1848
# df = DataFrame(np.random.randn(10, 4),
# index=[np.arange(5).repeat(2),
# np.tile(np.arange(2), 5)])
#
# with pytest.raises(Exception):
# store.put('foo', df, format='table')
def test_append_with_diff_col_name_types_raises_value_error(self, setup_path):
df = DataFrame(np.random.randn(10, 1))
df2 = DataFrame({"a": np.random.randn(10)})
df3 = DataFrame({(1, 2): np.random.randn(10)})
df4 = DataFrame({("1", 2): np.random.randn(10)})
df5 = DataFrame({("1", 2, object): np.random.randn(10)})
with ensure_clean_store(setup_path) as store:
name = "df_{}".format(tm.rands(10))
store.append(name, df)
for d in (df2, df3, df4, df5):
with pytest.raises(ValueError):
store.append(name, d)
def test_query_with_nested_special_character(self, setup_path):
df = DataFrame(
{
"a": ["a", "a", "c", "b", "test & test", "c", "b", "e"],
"b": [1, 2, 3, 4, 5, 6, 7, 8],
}
)
expected = df[df.a == "test & test"]
with ensure_clean_store(setup_path) as store:
store.append("test", df, format="table", data_columns=True)
result = store.select("test", 'a = "test & test"')
tm.assert_frame_equal(expected, result)
def test_categorical(self, setup_path):
with ensure_clean_store(setup_path) as store:
# Basic
_maybe_remove(store, "s")
s = Series(
Categorical(
["a", "b", "b", "a", "a", "c"],
categories=["a", "b", "c", "d"],
ordered=False,
)
)
store.append("s", s, format="table")
result = store.select("s")
tm.assert_series_equal(s, result)
_maybe_remove(store, "s_ordered")
s = Series(
Categorical(
["a", "b", "b", "a", "a", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
)
store.append("s_ordered", s, format="table")
result = store.select("s_ordered")
tm.assert_series_equal(s, result)
_maybe_remove(store, "df")
df = DataFrame({"s": s, "vals": [1, 2, 3, 4, 5, 6]})
store.append("df", df, format="table")
result = store.select("df")
tm.assert_frame_equal(result, df)
# Dtypes
_maybe_remove(store, "si")
s = Series([1, 1, 2, 2, 3, 4, 5]).astype("category")
store.append("si", s)
result = store.select("si")
tm.assert_series_equal(result, s)
_maybe_remove(store, "si2")
s = Series([1, 1, np.nan, 2, 3, 4, 5]).astype("category")
store.append("si2", s)
result = store.select("si2")
tm.assert_series_equal(result, s)
# Multiple
_maybe_remove(store, "df2")
df2 = df.copy()
df2["s2"] = Series(list("abcdefg")).astype("category")
store.append("df2", df2)
result = store.select("df2")
tm.assert_frame_equal(result, df2)
# Make sure the metadata is OK
info = store.info()
assert "/df2 " in info
# assert '/df2/meta/values_block_0/meta' in info
assert "/df2/meta/values_block_1/meta" in info
# unordered
_maybe_remove(store, "s2")
s = Series(
Categorical(
["a", "b", "b", "a", "a", "c"],
categories=["a", "b", "c", "d"],
ordered=False,
)
)
store.append("s2", s, format="table")
result = store.select("s2")
tm.assert_series_equal(result, s)
# Query
_maybe_remove(store, "df3")
store.append("df3", df, data_columns=["s"])
expected = df[df.s.isin(["b", "c"])]
result = store.select("df3", where=['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(["b", "c"])]
result = store.select("df3", where=['s = ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(["d"])]
result = store.select("df3", where=['s in ["d"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(["f"])]
result = store.select("df3", where=['s in ["f"]'])
tm.assert_frame_equal(result, expected)
# Appending with same categories is ok
store.append("df3", df)
df = concat([df, df])
expected = df[df.s.isin(["b", "c"])]
result = store.select("df3", where=['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
# Appending must have the same categories
df3 = df.copy()
df3["s"].cat.remove_unused_categories(inplace=True)
with pytest.raises(ValueError):
store.append("df3", df3)
# Remove, and make sure meta data is removed (its a recursive
# removal so should be).
result = store.select("df3/meta/s/meta")
assert result is not None
store.remove("df3")
with pytest.raises(
KeyError, match="'No object named df3/meta/s/meta in the file'"
):
store.select("df3/meta/s/meta")
def test_categorical_conversion(self, setup_path):
# GH13322
# Check that read_hdf with categorical columns doesn't return rows if
# where criteria isn't met.
obsids = ["ESP_012345_6789", "ESP_987654_3210"]
imgids = ["APF00006np", "APF0001imm"]
data = [4.3, 9.8]
# Test without categories
df = DataFrame(dict(obsids=obsids, imgids=imgids, data=data))
# We are expecting an empty DataFrame matching types of df
expected = df.iloc[[], :]
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table", data_columns=True)
result = read_hdf(path, "df", where="obsids=B")
tm.assert_frame_equal(result, expected)
# Test with categories
df.obsids = df.obsids.astype("category")
df.imgids = df.imgids.astype("category")
# We are expecting an empty DataFrame matching types of df
expected = df.iloc[[], :]
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table", data_columns=True)
result = read_hdf(path, "df", where="obsids=B")
tm.assert_frame_equal(result, expected)
def test_categorical_nan_only_columns(self, setup_path):
# GH18413
# Check that read_hdf with categorical columns with NaN-only values can
# be read back.
df = pd.DataFrame(
{
"a": ["a", "b", "c", np.nan],
"b": [np.nan, np.nan, np.nan, np.nan],
"c": [1, 2, 3, 4],
"d": pd.Series([None] * 4, dtype=object),
}
)
df["a"] = df.a.astype("category")
df["b"] = df.b.astype("category")
df["d"] = df.b.astype("category")
expected = df
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table", data_columns=True)
result = read_hdf(path, "df")
tm.assert_frame_equal(result, expected)
def test_duplicate_column_name(self, setup_path):
df = DataFrame(columns=["a", "a"], data=[[0, 0]])
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
df.to_hdf(path, "df", format="fixed")
df.to_hdf(path, "df", format="table")
other = read_hdf(path, "df")
tm.assert_frame_equal(df, other)
assert df.equals(other)
assert other.equals(df)
def test_round_trip_equals(self, setup_path):
# GH 9330
df = DataFrame({"B": [1, 2], "A": ["x", "y"]})
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table")
other = read_hdf(path, "df")
tm.assert_frame_equal(df, other)
assert df.equals(other)
assert other.equals(df)
def test_preserve_timedeltaindex_type(self, setup_path):
# GH9635
# Storing TimedeltaIndexed DataFrames in fixed stores did not preserve
# the type of the index.
df = DataFrame(np.random.normal(size=(10, 5)))
df.index = timedelta_range(start="0s", periods=10, freq="1s", name="example")
with ensure_clean_store(setup_path) as store:
store["df"] = df
tm.assert_frame_equal(store["df"], df)
def test_columns_multiindex_modified(self, setup_path):
# BUG: 7212
# read_hdf store.select modified the passed columns parameters
# when multi-indexed.
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
df.index.name = "letters"
df = df.set_index(keys="E", append=True)
data_columns = df.index.names + df.columns.tolist()
with ensure_clean_path(setup_path) as path:
df.to_hdf(
path,
"df",
mode="a",
append=True,
data_columns=data_columns,
index=False,
)
cols2load = list("BCD")
cols2load_original = list(cols2load)
df_loaded = read_hdf(path, "df", columns=cols2load) # noqa
assert cols2load_original == cols2load
@ignore_natural_naming_warning
def test_to_hdf_with_object_column_names(self, setup_path):
# GH9057
# Writing HDF5 table format should only work for string-like
# column types
types_should_fail = [
tm.makeIntIndex,
tm.makeFloatIndex,
tm.makeDateIndex,
tm.makeTimedeltaIndex,
tm.makePeriodIndex,
]
types_should_run = [
tm.makeStringIndex,
tm.makeCategoricalIndex,
tm.makeUnicodeIndex,
]
for index in types_should_fail:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
msg = "cannot have non-object label DataIndexableCol"
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, "df", format="table", data_columns=True)
for index in types_should_run:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
df.to_hdf(path, "df", format="table", data_columns=True)
result = pd.read_hdf(
path, "df", where="index = [{0}]".format(df.index[0])
)
assert len(result)
def test_read_hdf_open_store(self, setup_path):
# GH10330
# No check for non-string path_or-buf, and no test of open store
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
df.index.name = "letters"
df = df.set_index(keys="E", append=True)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
direct = read_hdf(path, "df")
store = HDFStore(path, mode="r")
indirect = read_hdf(store, "df")
tm.assert_frame_equal(direct, indirect)
assert store.is_open
store.close()
def test_read_hdf_iterator(self, setup_path):
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
df.index.name = "letters"
df = df.set_index(keys="E", append=True)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w", format="t")
direct = read_hdf(path, "df")
iterator = read_hdf(path, "df", iterator=True)
assert isinstance(iterator, TableIterator)
indirect = next(iterator.__iter__())
tm.assert_frame_equal(direct, indirect)
iterator.store.close()
def test_read_hdf_errors(self, setup_path):
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
with ensure_clean_path(setup_path) as path:
with pytest.raises(IOError):
read_hdf(path, "key")
df.to_hdf(path, "df")
store = HDFStore(path, mode="r")
store.close()
with pytest.raises(IOError):
read_hdf(store, "df")
def test_read_hdf_generic_buffer_errors(self):
with pytest.raises(NotImplementedError):
read_hdf(BytesIO(b""), "df")
def test_invalid_complib(self, setup_path):
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
df.to_hdf(path, "df", complib="foolib")
# GH10443
def test_read_nokey(self, setup_path):
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
# Categorical dtype not supported for "fixed" format. So no need
# to test with that dtype in the dataframe here.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="a")
reread = read_hdf(path)
tm.assert_frame_equal(df, reread)
df.to_hdf(path, "df2", mode="a")
with pytest.raises(ValueError):
read_hdf(path)
def test_read_nokey_table(self, setup_path):
# GH13231
df = DataFrame({"i": range(5), "c": Series(list("abacd"), dtype="category")})
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="a", format="table")
reread = read_hdf(path)
tm.assert_frame_equal(df, reread)
df.to_hdf(path, "df2", mode="a", format="table")
with pytest.raises(ValueError):
read_hdf(path)
def test_read_nokey_empty(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path)
store.close()
with pytest.raises(ValueError):
read_hdf(path)
@td.skip_if_no("pathlib")
def test_read_from_pathlib_path(self, setup_path):
# GH11773
from pathlib import Path
expected = DataFrame(
np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE")
)
with ensure_clean_path(setup_path) as filename:
path_obj = Path(filename)
expected.to_hdf(path_obj, "df", mode="a")
actual = read_hdf(path_obj, "df")
tm.assert_frame_equal(expected, actual)
@td.skip_if_no("py.path")
def test_read_from_py_localpath(self, setup_path):
# GH11773
from py.path import local as LocalPath
expected = DataFrame(
np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE")
)
with ensure_clean_path(setup_path) as filename:
path_obj = LocalPath(filename)
expected.to_hdf(path_obj, "df", mode="a")
actual = read_hdf(path_obj, "df")
tm.assert_frame_equal(expected, actual)
def test_query_long_float_literal(self, setup_path):
# GH 14241
df = pd.DataFrame({"A": [1000000000.0009, 1000000000.0011, 1000000000.0015]})
with ensure_clean_store(setup_path) as store:
store.append("test", df, format="table", data_columns=True)
cutoff = 1000000000.0006
result = store.select("test", "A < {cutoff:.4f}".format(cutoff=cutoff))
assert result.empty
cutoff = 1000000000.0010
result = store.select("test", "A > {cutoff:.4f}".format(cutoff=cutoff))
expected = df.loc[[1, 2], :]
tm.assert_frame_equal(expected, result)
exact = 1000000000.0011
result = store.select("test", "A == {exact:.4f}".format(exact=exact))
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
def test_query_compare_column_type(self, setup_path):
# GH 15492
df = pd.DataFrame(
{
"date": ["2014-01-01", "2014-01-02"],
"real_date": date_range("2014-01-01", periods=2),
"float": [1.1, 1.2],
"int": [1, 2],
},
columns=["date", "real_date", "float", "int"],
)
with ensure_clean_store(setup_path) as store:
store.append("test", df, format="table", data_columns=True)
ts = pd.Timestamp("2014-01-01") # noqa
result = store.select("test", where="real_date > ts")
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
for op in ["<", ">", "=="]:
# non strings to string column always fail
for v in [2.1, True, pd.Timestamp("2014-01-01"), pd.Timedelta(1, "s")]:
query = "date {op} v".format(op=op)
with pytest.raises(TypeError):
store.select("test", where=query)
# strings to other columns must be convertible to type
v = "a"
for col in ["int", "float", "real_date"]:
query = "{col} {op} v".format(op=op, col=col)
with pytest.raises(ValueError):
store.select("test", where=query)
for v, col in zip(
["1", "1.1", "2014-01-01"], ["int", "float", "real_date"]
):
query = "{col} {op} v".format(op=op, col=col)
result = store.select("test", where=query)
if op == "==":
expected = df.loc[[0], :]
elif op == ">":
expected = df.loc[[1], :]
else:
expected = df.loc[[], :]
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize("format", ["fixed", "table"])
def test_read_hdf_series_mode_r(self, format, setup_path):
# GH 16583
# Tests that reading a Series saved to an HDF file
# still works if a mode='r' argument is supplied
series = tm.makeFloatSeries()
with
|
ensure_clean_path(setup_path)
|
pandas.tests.io.pytables.common.ensure_clean_path
|
import datetime
from urllib.parse import urlparse
import numpy as np
import pandas as pd
import pytest
from visions.application.summaries import CompleteSummary
from visions.types import (
URL,
Boolean,
Categorical,
Complex,
DateTime,
Float,
Geometry,
Integer,
Object,
String,
)
@pytest.fixture(scope="class")
def summary():
return CompleteSummary()
def validate_summary_output(test_series, visions_type, correct_output, summary):
trial_output = summary.summarize_series(test_series, visions_type)
for metric, result in correct_output.items():
assert metric in trial_output, "Metric `{metric}` is missing".format(
metric=metric
)
if isinstance(trial_output[metric], pd.Series):
trial_output[metric] = trial_output[metric].to_dict()
assert (
trial_output[metric] == result
), "Expected value {result} for metric `{metric}`, got {output}".format(
result=result, metric=metric, output=trial_output[metric]
)
def test_integer_summary(summary, visions_type=Integer):
test_series = pd.Series([0, 1, 2, 3, 4])
correct_output = {
"n_unique": 5,
"mean": 2,
"median": 2,
"std": pytest.approx(1.58113, 0.00001),
"max": 4,
"min": 0,
"n_records": 5,
"n_zeros": 1,
}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_integer_missing_summary(summary, visions_type=Integer):
test_series = pd.Series([0, 1, 2, 3, 4])
correct_output = {
"n_unique": 5,
"mean": 2,
"median": 2,
"std": pytest.approx(1.58113, 0.00001),
"max": 4,
"min": 0,
"n_records": 5,
"n_zeros": 1,
"na_count": 0,
}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_float_missing_summary(summary, visions_type=Float):
test_series = pd.Series([0.0, 1.0, 2.0, 3.0, 4.0, np.nan])
correct_output = {
"n_unique": 5,
"median": 2,
"mean": 2,
"std": pytest.approx(1.58113, 0.00001),
"max": 4,
"min": 0,
"n_records": 6,
"n_zeros": 1,
"na_count": 1,
}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_bool_missing_summary(summary, visions_type=Boolean):
test_series =
|
pd.Series([True, False, True, True, np.nan])
|
pandas.Series
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import ALL
import flask
from flask import render_template_string
import glob
import os
import pickle
image_directory = 'static/'
list_of_images = [os.path.basename(x) for x in glob.glob(
'{}*.mp4'.format(image_directory))]
# static_image_route = 'static/gifs/'
# print(len(list_of_images))
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
from PIL import Image
from io import StringIO
WIDTH = 200
HEIGHT = 120
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
# app = dash.Dash()
import pandas as pd
from epic_kitchens import meta
from flask import Flask, request, render_template, send_from_directory
def pd_to_dict(df, entity='verbs'):
x_coarse_dict = {}
for index, row in df.iterrows():
x_list = row[entity]
x_class = row['class_key']
for i in x_list:
x_coarse_dict[i] = x_class
return x_coarse_dict
# available_indicators = df['Indicator Name'].unique()
df_noun = meta.noun_classes()
df_verb = meta.verb_classes()
# include None as an action
df2_noun = pd.DataFrame({"class_key": ['None'], "nouns": [['None']]})
df_noun = df_noun.append(df2_noun)
df2_verb = pd.DataFrame({"class_key": ['None'], "verbs": [['None']]})
df_verb = df_verb.append(df2_verb)
noun_coarse_dict = pd_to_dict(df_noun, entity='nouns')
verb_coarse_dict = pd_to_dict(df_verb, entity='verbs')
# available verbs
verb_dict = {'Verbs': ['open', 'take', 'get']}
available_entities_verb =
|
pd.DataFrame.from_dict(verb_dict)
|
pandas.DataFrame.from_dict
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 20 11:40:08 2020
@author: <NAME>
"""
#%% Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.pipeline import Pipeline
from scipy import stats
from sklearn.model_selection import cross_val_score
#%% Import data
# import data from csv file
filename = r"C:\Users\user\Documents\Google Drive\Business\Python\Job_MultRegression\Placement_Data_Full_Class.csv"
df = pd.read_csv(filename)
# initial EDA
print(df.head(10))
print(df.shape)
print(df.dtypes)
print(df.describe())
print(df.isna().sum())
#%% Data cleaning and preprocessing
# drop individuals not currently working
data = df.dropna(subset=['salary'])
# drop secondary education and non-relevant information
data.drop(columns=['sl_no', 'ssc_b', 'hsc_b', 'hsc_s', 'status'], inplace=True)
# final EDA
print(data.head(10))
print(data.shape)
print(data.dtypes)
print(data.describe())
print(data.isna().sum())
# reset index of final data
data.reset_index(inplace=True, drop = True)
# get dummy variables for categorical data
data = pd.get_dummies(data, drop_first=True)
# remove outliers
z_scores = stats.zscore(data)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 5).all(axis=1)
data = data[filtered_entries]
# split of data into train and test
X = data.drop(columns=['salary'])
y = data.salary
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
#%% Graphs
# graphical representation of relevant numeric columns
sns.pairplot(data, vars=['degree_p','etest_p','mba_p','salary'])
# salary boxplot
plt.boxplot(data.salary)
plt.show()
#%% Linear regression
# initialise Linear Regression
regressor = LinearRegression()
# fit training data
regressor.fit(X_train, y_train)
# predict test data
y_pred_reg = regressor.predict(X_test)
# determine how good the prediction is
print('Linear Regressor:')
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred_reg))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred_reg))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred_reg)))
print('Error relative to mean:', round(np.sqrt(metrics.mean_squared_error(y_test, y_pred_reg))/y.mean()*100, 2), '%')
print('Score: ', regressor.score(X_test, y_test))
# comparison of test data and predicted data
comparison = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred_reg})
comparison.plot(kind='bar',figsize=(10,8))
plt.title('Linear regression')
plt.xlabel('Person index')
plt.ylabel('Salary')
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.show()
coeff_df =
|
pd.DataFrame(regressor.coef_, X.columns, columns=['Coefficient'])
|
pandas.DataFrame
|
from abc import abstractmethod
import pandas as pd
from aistac.components.abstract_component import AbstractComponent
from aistac.components.aistac_commons import DataAnalytics
from ds_discovery.components.commons import Commons
from ds_discovery.components.discovery import DataDiscovery, Visualisation
__author__ = '<NAME>'
class AbstractCommonComponent(AbstractComponent):
DEFAULT_MODULE = 'ds_discovery.handlers.pandas_handlers'
DEFAULT_SOURCE_HANDLER = 'PandasSourceHandler'
DEFAULT_PERSIST_HANDLER = 'PandasPersistHandler'
@classmethod
@abstractmethod
def from_uri(cls, task_name: str, uri_pm_path: str, username: str, uri_pm_repo: str=None, pm_file_type: str=None,
pm_module: str=None, pm_handler: str=None, pm_kwargs: dict=None, default_save=None,
reset_templates: bool=None, template_path: str=None, template_module: str=None,
template_source_handler: str=None, template_persist_handler: str=None, align_connectors: bool=None,
default_save_intent: bool=None, default_intent_level: bool=None, order_next_available: bool=None,
default_replace_intent: bool=None, has_contract: bool=None):
return cls
@classmethod
def discovery_pad(cls) -> DataDiscovery:
""" A class method to use the Components discovery methods as a scratch pad"""
return DataDiscovery()
@classmethod
def visual_pad(cls) -> Visualisation:
""" A class method to use the Components visualisation methods as a scratch pad"""
return Visualisation()
@property
def discover(self) -> DataDiscovery:
"""The components instance"""
return DataDiscovery()
@property
def visual(self) -> Visualisation:
"""The visualisation instance"""
return Visualisation()
def load_source_canonical(self, **kwargs) -> pd.DataFrame:
"""returns the contracted source data as a DataFrame """
return self.load_canonical(self.CONNECTOR_SOURCE, **kwargs)
def load_canonical(self, connector_name: str, **kwargs) -> pd.DataFrame:
"""returns the canonical of the referenced connector
:param connector_name: the name or label to identify and reference the connector
"""
canonical = super().load_canonical(connector_name=connector_name, **kwargs)
if isinstance(canonical, dict):
canonical = pd.DataFrame.from_dict(data=canonical)
return canonical
def load_persist_canonical(self, **kwargs) -> pd.DataFrame:
"""loads the clean pandas.DataFrame from the clean folder for this contract"""
return self.load_canonical(self.CONNECTOR_PERSIST, **kwargs)
def save_persist_canonical(self, canonical, auto_connectors: bool=None, **kwargs):
"""Saves the canonical to the clean files folder, auto creating the connector from template if not set"""
if auto_connectors if isinstance(auto_connectors, bool) else True:
if not self.pm.has_connector(self.CONNECTOR_PERSIST):
self.set_persist()
self.persist_canonical(connector_name=self.CONNECTOR_PERSIST, canonical=canonical, **kwargs)
def add_column_description(self, column_name: str, description: str, save: bool=None):
""" adds a description note that is included in with the 'report_column_catalog'"""
if isinstance(description, str) and description:
self.pm.set_intent_description(level=column_name, text=description)
self.pm_persist(save)
return
def setup_bootstrap(self, domain: str=None, project_name: str=None, path: str=None, file_type: str=None,
description: str=None):
""" Creates a bootstrap Transition setup. Note this does not set the source
:param domain: (optional) The domain this simulators sits within e.g. 'Healthcare' or 'Financial Services'
:param project_name: (optional) a project name that will replace the hadron naming on file prefix
:param path: (optional) a path added to the template path default
:param file_type: (optional) a file_type for the persisted file, default is 'parquet'
:param description: (optional) a description of the component instance to overwrite the default
"""
domain = domain.title() if isinstance(domain, str) else 'Unspecified'
file_type = file_type if isinstance(file_type, str) else 'parquet'
project_name = project_name if isinstance(project_name, str) else 'hadron'
file_name = self.pm.file_pattern(name='dataset', project=project_name.lower(), path=path, file_type=file_type,
versioned=True)
self.set_persist(uri_file=file_name)
component = self.pm.manager_name()
if not isinstance(description, str):
description = f"{domain} domain {component} component for {project_name} {self.pm.task_name} contract"
self.set_description(description=description)
def save_report_canonical(self, reports: [str, list], report_canonical: [dict, pd.DataFrame],
replace_connectors: bool=None, auto_connectors: bool=None, save: bool=None, **kwargs):
"""saves one or a list of reports using the TEMPLATE_PERSIST connector contract. Though a report can be of any
name, for convention and consistency each component has a set of REPORT constants <Component>.REPORT_<NAME>
where <Component> is the component Class name and <name> is the name of the report_canonical.
The reports can be a simple string name or a list of names. The name list can be a string or a dictionary
providing more detailed parameters on how to represent the report. These parameters keys are
:key report: the name of the report
:key file_type: (optional) a file type other than the default .json
:key versioned: (optional) if the filename should be versioned
:key stamped: (optional) A string of the timestamp options ['days', 'hours', 'minutes', 'seconds', 'ns']
Some examples
self.REPORT_SCHEMA
[self.REPORT_NOTES, self.REPORT_SCHEMA]
[self.REPORT_NOTES, {'report': self.REPORT_SCHEMA, 'uri_file': '<file_name>'}]
[{'report': self.REPORT_NOTES, 'file_type': 'json'}]
[{'report': self.REPORT_SCHEMA, 'file_type': 'csv', 'versioned': True, 'stamped': days}]
:param reports: a report name or list of report names to save
:param report_canonical: a relating canonical to base the report on
:param auto_connectors: (optional) if a connector should be created automatically
:param replace_connectors: (optional) replace any existing report connectors with these reports
:param save: (optional) if True, save to file. Default is True
:param kwargs: additional kwargs to pass to a Connector Contract
"""
if not isinstance(reports, (str, list)):
raise TypeError(f"The reports type must be a str or list, {type(reports)} type passed")
auto_connectors = auto_connectors if isinstance(auto_connectors, bool) else True
replace_connectors = replace_connectors if isinstance(replace_connectors, bool) else False
_report_list = []
for _report in self.pm.list_formatter(reports):
if not isinstance(_report, (str, dict)):
raise TypeError(f"The report type {type(_report)} is an unsupported type. Must be string or dict")
if isinstance(_report, str):
_report = {'report': _report}
if not _report.get('report', None):
raise ValueError(f"if not a string the reports list dict elements must have a 'report' key")
_report_list.append(_report)
if replace_connectors:
self.set_report_persist(reports=_report_list, save=save)
for _report in _report_list:
connector_name = _report.get('report')
if not self.pm.has_connector(connector_name):
if auto_connectors:
self.set_report_persist(reports=[_report], save=save)
else:
continue
self.persist_canonical(connector_name=connector_name, canonical=report_canonical, **kwargs)
return
def save_canonical_schema(self, schema_name: str=None, canonical: pd.DataFrame=None, schema_tree: list=None,
exclude_associate: list=None, detail_numeric: bool=None, strict_typing: bool=None,
category_limit: int=None, save: bool=None):
""" Saves the canonical schema to the Property contract. The default loads the clean canonical but optionally
a canonical can be passed to base the schema on and optionally a name given other than the default
:param schema_name: (optional) the name of the schema to save
:param canonical: (optional) the canonical to base the schema on
:param schema_tree: (optional) an analytics dict (see Discovery.analyse_association(...)
:param exclude_associate: (optional) a list of dot notation tree of items to exclude from iteration
(e.g. ['age.gender.salary'] will cut 'salary' branch from gender and all sub branches)
:param detail_numeric: (optional) if numeric columns should have detail stats, slowing analysis. default False
:param strict_typing: (optional) stops objects and string types being seen as categories. default True
:param category_limit: (optional) a global cap on categories captured. default is 10
:param save: (optional) if True, save to file. Default is True
"""
schema_name = schema_name if isinstance(schema_name, str) else self.REPORT_SCHEMA
canonical = canonical if isinstance(canonical, pd.DataFrame) else self.load_persist_canonical()
schema_tree = schema_tree if isinstance(schema_tree, list) else canonical.columns.to_list()
detail_numeric = detail_numeric if isinstance(detail_numeric, bool) else False
strict_typing = strict_typing if isinstance(strict_typing, bool) else True
category_limit = category_limit if isinstance(category_limit, int) else 10
analytics = DataDiscovery.analyse_association(canonical, columns_list=schema_tree,
exclude_associate=exclude_associate,
detail_numeric=detail_numeric, strict_typing=strict_typing,
category_limit=category_limit)
self.pm.set_canonical_schema(name=schema_name, schema=analytics)
self.pm_persist(save=save)
return
@staticmethod
def canonical_report(canonical, stylise: bool=True, inc_next_dom: bool=False, report_header: str=None,
condition: str=None):
"""The Canonical Report is a data dictionary of the canonical providing a reference view of the dataset's
attribute properties
:param canonical: the DataFrame to view
:param stylise: if True present the report stylised.
:param inc_next_dom: (optional) if to include the next dominate element column
:param report_header: (optional) filter on a header where the condition is true. Condition must exist
:param condition: (optional) the condition to apply to the header. Header must exist. examples:
' > 0.95', ".str.contains('shed')"
:return:
"""
return DataDiscovery.data_dictionary(df=canonical, stylise=stylise, inc_next_dom=inc_next_dom,
report_header=report_header, condition=condition)
def report_canonical_schema(self, schema: [str, dict]=None, roots: [str, list]=None,
sections: [str, list]=None, elements: [str, list]=None, stylise: bool=True):
""" presents the current canonical schema
:param schema: (optional) the name of the schema
:param roots: (optional) one or more tree roots
:param sections: (optional) the section under the root
:param elements: (optional) the element in the section
:param stylise: if True present the report stylised.
:return: pd.DataFrame
"""
if not isinstance(schema, dict):
schema = schema if isinstance(schema, str) else self.REPORT_SCHEMA
if not self.pm.has_canonical_schema(name=schema):
raise ValueError(f"There is no Schema currently stored under the name '{schema}'")
schema = self.pm.get_canonical_schema(name=schema)
df = pd.DataFrame(columns=['root', 'section', 'element', 'value'])
root_list = DataAnalytics.get_tree_roots(analytics_blob=schema)
if isinstance(roots, (str, list)):
roots = Commons.list_formatter(roots)
for root in roots:
if root not in root_list:
raise ValueError(f"The root '{root}' can not be found in the analytics tree roots")
root_list = roots
for root_items in root_list:
data_analysis = DataAnalytics.from_root(analytics_blob=schema, root=root_items)
for section in data_analysis.section_names:
if isinstance(sections, (str, list)):
if section not in Commons.list_formatter(sections):
continue
for element, value in data_analysis.get(section).items():
if isinstance(elements, (str, list)):
if element not in Commons.list_formatter(elements):
continue
to_append = [root_items, section, element, value]
a_series =
|
pd.Series(to_append, index=df.columns)
|
pandas.Series
|
#pip install streamlit
#pip install pandas
#pip install sklearn
# IMPORT STATEMENTS
import streamlit as st
import pandas as pd
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import plotly.figure_factory as ff
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
import seaborn as sns
df = pd.read_csv(r'C:\Users\PrasadRecharla_DataScience\DataScience_LiveProjects\ML_Projects\diabetes_prediction_kaggle\diabetes.csv')
#
# HEADINGS
st.title('Diabetes Checkup')
st.sidebar.header('Patient Data')
st.subheader('Training Data Stats')
st.write(df.describe())
# X AND Y DATA
x = df.drop(['Outcome'], axis = 1)
y = df.iloc[:, -1]
x_train, x_test, y_train, y_test = train_test_split(x,y, test_size = 0.2, random_state = 0)
# FUNCTION
def user_report():
pregnancies = st.sidebar.slider('Pregnancies', 0,19, 3 )
glucose = st.sidebar.slider('Glucose', 0,450, 120 )
bp = st.sidebar.slider('Blood Pressure', 0,250, 70 )
skinthickness = st.sidebar.slider('Skin Thickness', 0,100, 20 )
insulin = st.sidebar.slider('Insulin', 0,846, 79 )
bmi = st.sidebar.slider('BMI', 0,67, 20 )
dpf = st.sidebar.slider('Diabetes Pedigree Function', 0.0,2.4, 0.47 )
age = st.sidebar.slider('Age', 15,88, 33 )
user_report_data = {
'pregnancies':pregnancies,
'glucose':glucose,
'bp':bp,
'skinthickness':skinthickness,
'insulin':insulin,
'bmi':bmi,
'dpf':dpf,
'age':age
}
report_data =
|
pd.DataFrame(user_report_data, index=[0])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""Recommender_System.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1_yvJ9w2fZE6sxmTSjig7LhQhWl0HOG8p
# Importing data and lemmatizing the data
"""
import numpy as np
import pandas as pd
import re
import scipy
import math
URL = 'https://drive.google.com/file/d/137eW4F35OctoRuq5DasVUXw6GpmfXdBS/view?usp=sharing'
path = 'https://drive.google.com/uc?export=download&id='+URL.split('/')[-2]
#df = pd.read_pickle(path)
data = pd.read_csv(path, skip_blank_lines=True)
pd.set_option('display.max_colwidth', None)
print(data.shape)
data.drop_duplicates(subset='content', inplace=True, ignore_index=True)
data.shape
data.head(1)
data[data['_id'] == '6076fadb0b3e8bc9b779293e']['_id'].to_string()
def make_lower_case(text):
return text.lower()
import re
from pprint import pprint
import nltk, spacy, gensim
from sklearn.feature_extraction.text import CountVectorizer
def get_lemmatized_clean_data(df):
# Convert to list
data = df.content.tolist()
# Remove Emails
data = [re.sub('\S*@\S*\s?', '', sent) for sent in data]
# Remove new line characters
data = [re.sub('\s+', ' ', sent) for sent in data]
# Remove distracting single quotes
data = [re.sub("\'", "", sent) for sent in data]
# pprint(data[:1])
def sent_to_words(sentences):
for sentence in sentences:
yield(gensim.utils.simple_preprocess(str(sentence), deacc=True)) # deacc=True removes punctuations
data_words = list(sent_to_words(data))
# print(data_words[:1])
def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
"""https://spacy.io/api/annotation"""
texts_out = []
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append(" ".join([token.lemma_ if token.lemma_ not in ['-PRON-'] else '' for token in doc if token.pos_ in allowed_postags]))
return texts_out
# Initialize spacy 'en' model, keeping only tagger component (for efficiency)
# Run in terminal: python3 -m spacy download en
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
# Do lemmatization keeping only Noun, Adj, Verb, Adverb
data_lemmatized = lemmatization(data_words, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
return data_lemmatized
X = get_lemmatized_clean_data(data)
max_time = []
for i in X:
max_time.append(len(i.split(' '))/2.5)
data['Max_Time'] = max_time
data.head()
"""# SKlearn NewsData Import"""
from sklearn.datasets import fetch_20newsgroups
newsgroups_train = fetch_20newsgroups(subset='train')
def get_data(mydata):
mydata.keys()
df = pd.DataFrame([mydata['data'],[mydata['target_names'][idx] for idx in mydata['target']],mydata['target']])
df = df.transpose()
df.columns = ['content', 'target_names', 'target']
return df
df = get_data(newsgroups_train)
df.head()
news = data.drop(axis = 1, columns=['_id', 'y',]).to_numpy()
data_lemmatized = get_lemmatized_clean_data(df)
df.head()
"""# Converting the data to bag_of_word representation"""
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
my_stopwords = stopwords.words('english')
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
vectorizor = TfidfVectorizer(stop_words=my_stopwords, lowercase= True)
bag_of_words = vectorizor.fit_transform(X)
"""# Content Based Similarity Using TFIDF Vector"""
from numpy import dot
from numpy.linalg import norm
def similarity(a,b):
cos_sim = dot(a, b)/(norm(a)*norm(b))
return cos_sim
def ContentBasedFiltering(id, first_n = 10):
similarity_dic = {}
news_index = data[data['_id']==id].index[0]
for i in data['_id']:
an_index = data[data['_id']==i].index[0]
a = np.array(bag_of_words[news_index].todense())[0]
b = np.array(bag_of_words[an_index].todense())[0]
similarity_dic[i] = similarity(a, b)
sorted_most_similar = sorted(similarity_dic.items(), key =
lambda kv:(kv[1], kv[0]), reverse=True)
return sorted_most_similar[:first_n]
ContentBasedFiltering('6076fadb0b3e8bc9b779293e')
for keys in ContentBasedFiltering('6076fadb0b3e8bc9b779293e'):
print(data[data['_id'] == keys[0]]['title'])
"""# Content Based Similarity Using SVD
"""
# Performing SVD
svd = TruncatedSVD(n_components=50)
lsa = svd.fit_transform(bag_of_words)
def SVDContentBasedFiltering(id, first_n = 10):
similarity_dic = {}
news_index = data[data['_id']==id].index[0]
for i in data['_id']:
an_index = data[data['_id']==i].index[0]
a = np.array(lsa[news_index])
b = np.array(lsa[an_index])
similarity_dic[i] = similarity(a, b)
sorted_most_similar = sorted(similarity_dic.items(), key =
lambda kv:(kv[1], kv[0]), reverse=True)
return sorted_most_similar[:first_n]
SVDContentBasedFiltering('6076fadb0b3e8bc9b779293e')
for keys in SVDContentBasedFiltering('6076fadb0b3e8bc9b779293e'):
print(data[data['_id'] == keys[0]]['title'])
"""# LDA Implementation
"""
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.model_selection import GridSearchCV
lda = LatentDirichletAllocation(learning_method='batch', n_jobs=-1)
bag_of_words.T
# LDA Cross-Validation
n_components = [20, 50, 70]
learning_decay = [0.5, 0.7, 0.8]
params = {'n_components': n_components, 'learning_decay': learning_decay}
model = GridSearchCV(lda, param_grid=params)
model.fit(bag_of_words.T)
best_params = model.best_estimator_
best_params
lda_res = best_params.components_.T
lda_res.shape
import pickle
pickle_file = 'lda_cross_validation_rev.pkl'
with open(pickle_file, 'wb') as file:
pickle.dump(model, file)
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# 1. Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# get the folder id where you want to save your file
file = drive.CreateFile({'parents':[{u'id': '19AI35wfuabh1JQ6b1Z3YH5uJ6uL3N6BD'}]})
file.SetContentFile(pickle_file)
file.Upload()
with open(pickle_file, 'rb') as file:
lda_pkl_model = pickle.load(file)
def LDAContentBasedFiltering(id, first_n = 10):
similarity_dic = {}
news_index = data[data['_id']==id].index[0]
for i in data['_id']:
an_index = data[data['_id']==i].index[0]
a = np.array(lda_res[news_index])
b = np.array(lda_res[an_index])
similarity_dic[i] = similarity(a, b)
sorted_most_similar = sorted(similarity_dic.items(), key =
lambda kv:(kv[1], kv[0]), reverse=True)
return sorted_most_similar[:first_n]
LDAContentBasedFiltering('6076fadb0b3e8bc9b779293e')
for keys in LDAContentBasedFiltering('6076fadb0b3e8bc9b779293e'):
print(data[data['_id'] == keys[0]]['title'])
"""# Word embedding using Glove Vectorizor"""
from google.colab import drive
drive.mount('/content/drive')
from numpy import array
from numpy import asarray
from numpy import zeros
embeddings_dictionary = dict()
glove_file = open('/content/drive/MyDrive/NLP and Text Analysis/glove.6B/glove.6B.100d.txt', encoding="utf8")
for line in glove_file:
records = line.split()
word = records[0]
vector_dimensions = asarray(records[1:], dtype='float32')
embeddings_dictionary[word] = vector_dimensions
glove_file.close()
i = 0
glov_stop = []
news_embedding_dict = dict()
for word in vectorizor.vocabulary_.keys():
if word in embeddings_dictionary:
news_embedding_dict[word] = embeddings_dictionary[word]
else:
glov_stop.append(word)
stopset = set(nltk.corpus.stopwords.words('english'))
new_stopwords_list = stopset.union(glov_stop)
vectorizor_glov = TfidfVectorizer(stop_words=new_stopwords_list)
glov_bag_of_words = vectorizor_glov.fit_transform(X)
y = np.array([val for (key, val) in news_embedding_dict.items()])
y.shape
glov_bag_of_words.shape
document_embedding = glov_bag_of_words*y
document_embedding.shape
document_embedding
def ContentBasedFilteringWordEmbedding(id, first_n = 10):
similarity_dic = {}
news_index = data[data['_id']==id].index[0]
for i in data['_id']:
an_index = data[data['_id']==i].index[0]
a = np.array(document_embedding[news_index])
b = np.array(document_embedding[an_index])
similarity_dic[i] = similarity(a, b)
sorted_most_similar = sorted(similarity_dic.items(), key =
lambda kv:(kv[1], kv[0]), reverse=True)
return sorted_most_similar[:first_n]
ContentBasedFilteringWordEmbedding('6076fadb0b3e8bc9b779293e')
"""# Collaborative Filtering"""
topics = ['cricket', 'football', 'golf', 'asia', 'africa', 'europe', 'americas', 'style', 'tech', 'science', 'hollywood', 'us politics', 'stock market', 'travel', 'coronavirus', 'black lives matter']
from random import sample
class User:
def __init__(self, id):
self.id = id
self.prefered_categories = sample(topics, np.random.randint(low=3, high= 5))
self.no_of_articles_served = np.random.randint(10, 50)*10
self.no_of_sessions = math.ceil((self.no_of_articles_served)/10)
self.ids = [self.id for _ in range(self.no_of_articles_served)]
self.sessions = []
self.articles_served = []
self.ratings = []
self.click = []
self.ranks = []
j = math.ceil(self.no_of_articles_served*0.7)
for m in range(j):
id_temp = np.random.choice(data[data['topics'].isin(self.prefered_categories)]['_id'])
self.articles_served.append(id_temp)
click = np.random.binomial(1, 0.7,1)[0]
self.click.append(click)
self.ratings.append('-' if click == 0 else np.random.randint((data[data['_id'] == id_temp]['Max_Time'])/4, data[data['_id'] == self.articles_served[m]]['Max_Time'])[0])
j = self.no_of_articles_served-j
for m in range(j):
id_temp = np.random.choice(data[~data['topics'].isin(self.prefered_categories)]['_id'])
self.articles_served.append(id_temp)
click = np.random.binomial(1, 0.1,1)[0]
self.click.append(click)
self.ratings.append('-' if click == 0 else np.random.randint(0, data[data['_id'] == id_temp]['Max_Time'])[0])
for i in range(self.no_of_sessions):
for k in range(10):
self.sessions.append(i)
self.ranks.append(k)
new_user = User(1)
data[data['_id'].isin(new_user.articles_served)].tail(10)
def CreateRandomUserProfiler(max_no_user = 40):
Users = []
for i in range(max_no_user):
Users.append(User(i))
print(Users[i-1].prefered_categories)
UserProfiler = pd.DataFrame(columns=['UserId', 'SessionID', 'ArticleID Served', 'Article Rank', 'Click', 'Time Spent'])
for user in Users:
df = pd.DataFrame()
df['UserId'] = user.ids
df['SessionID'] = user.sessions
df['ArticleID Served'] = user.articles_served
df['Article Rank'] = user.ranks
df['Click'] = user.click
df['Time Spent'] = user.ratings
UserProfiler =
|
pd.concat([UserProfiler,df], ignore_index=True)
|
pandas.concat
|
"""
Set of functions which help do common actions on data frames of our data
"""
import glob
import os
import numpy as np
import pandas as pd
from commons import consts
ALL = 'ALL'
NC_AND_PT = "PT"
ONLY_PT = "ONLY_PT"
NOT_NC = "ALL_CANCER"
LN_AND_PT = "LN_AND_PT"
ONLY_NC = "NC"
def remove_extreme_cpgs_by_coverage(df, top_low_level_to_remove=5):
"""
Remove CpG from a data frame which are extreme in coverage
:param df: The data frame
:type df: pd.DataFrame
:param top_low_level_to_remove: The percentage of CpC to remove from top and bottom, keep in mind that
this is percentage and not value base, meaning if every value is 3 that we will only remove
top_low_level_to_remove*2 % of the values and not all
:type top_low_level_to_remove: int
:return: A new df with the cpg we want to keep
"""
cpg_coverage = np.sum(~pd.isnull(df), axis=0)
cpg_coverage = cpg_coverage.sort_values()
cpg_s = cpg_coverage.shape[0]
n_to_remove = int(cpg_s * top_low_level_to_remove / 100)
cpg_to_keep = cpg_coverage.index[n_to_remove:-n_to_remove]
return df[cpg_to_keep] # this remove the top_low_level_to_remove lower and top
def get_nc_avg(chromosome, cpg_indexes, nc_meth_files):
"""
Get the average values of methylation in original cells based on a file already created
:param chromosome: The chromsome we are looking at
:param cpg_indexes: The cpg indexes
:param nc_meth_files: The nc files path
:return: The average methylation in those cells
"""
nc_file = glob.glob(os.path.join(nc_meth_files, "*chr%s.dummy*" % chromosome))[0]
df = pd.read_pickle(nc_file)
mdf = df.mean(axis=1)
return mdf.loc[cpg_indexes]
def cpg_meth_in_cells(patient, chromosome, cpgs_indexes, meth_files_path, sublineage_name):
"""
Get the methylation ratio of a patient in a chromosome for specific set of cells\sublineage_name
:param patient: The patient we are looking at
:param chromosome: The chromosome we are looking at
:param cpgs_indexes: The of cpgs
:param meth_files_path: The path for the patient files
:param sublineage_name: The sublineage name or cell type name
:return: A df with the indexes matching those cells type
"""
methylation_file_path = os.path.join(meth_files_path, patient, consts.SCWGBS_FILE_FORMAT %
(patient, chromosome))
df =
|
pd.read_pickle(methylation_file_path)
|
pandas.read_pickle
|
"""
download data from jqdatasdk
the launch should be in the morning
only for personal account.
update on Saturday 7:00
"""
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
import jqdatasdk as jq
import pickle
from datetime import datetime, timedelta
import time
import pytz
import pandas as pd
import pendulum
import jqdata.jqauth as jqauth
from util.util import haunter
LOG = haunter("nightly_finance")
BASELINE = "000001.XSHG" # SZZS - 000001
CACHE_NIGHTLY = os.path.join(BASE_DIR,"cache","nightly")
def pull_finance():
"""
"""
clock = pendulum.now("Asia/Shanghai")
if clock.month < 5:
finance_year = clock.year - 2
else:
finance_year = clock.year - 1
### get ticker_list
ticker_df = jq.get_all_securities(types=['stock'], date=None)
ticker_list = ticker_df.index.tolist()
## download finance
df_indicator = jq.get_fundamentals(jq.query(
jq.indicator.roe,
jq.indicator.inc_total_revenue_year_on_year,
jq.indicator.code,
jq.indicator.pubDate,
jq.indicator.statDate
).filter(
jq.valuation.code.in_(ticker_list)
), statDate=finance_year)
df_valuation = jq.get_fundamentals(jq.query(
jq.valuation.code,
jq.valuation.day,
jq.valuation.pe_ratio,
jq.valuation.pe_ratio_lyr,
jq.valuation.market_cap,
jq.valuation.circulating_cap,
jq.valuation.circulating_market_cap
).filter(
jq.valuation.code.in_(ticker_list)
), date=None)
df =
|
pd.merge(df_indicator,df_valuation,on='code')
|
pandas.merge
|
# Create class for new dataframe
import pandas as pd
from sklearn.model_selection import train_test_split
class NewDF():
def __init__(self, df, target, column):
self.df = df
self.target = target
self.column = column
@property
def ToDF(self):
self.df =
|
pd.to_DataFrame(self.df)
|
pandas.to_DataFrame
|
"""Helper scintific module
Module serves for custom methods to support Customer Journey Analytics Project
"""
# IMPORTS
# -------
# Standard libraries
import re
import ipdb
import string
import math
# 3rd party libraries
from google.cloud import bigquery
import numpy as np
import pandas as pd
import nltk
nltk.download(['wordnet', 'stopwords'])
STOPWORDS = nltk.corpus.stopwords.words('english')
from scipy import stats
import statsmodels.api as sm
from statsmodels.formula.api import ols
import scikit_posthocs as sp
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import GridSearchCV
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import ComplementNB
from sklearn.metrics import f1_score
from sklearn.decomposition import PCA
import rpy2
import rpy2.rlike.container as rlc
from rpy2 import robjects
from rpy2.robjects.vectors import FloatVector
from rpy2.robjects.vectors import ListVector
from rpy2.robjects.vectors import StrVector
from rpy2.robjects import pandas2ri
from matplotlib import pyplot as plt
import seaborn as sns
# MODULE FUNCTIONS
# ----------------
def get_dissimilarity(df, normalize=True):
'''Calculates dissimilarity of observations from average
observation.
Args:
df: Data as dataframe of shape (# observations, # variables)
Returns:
ser: Calculated dissimilrity as series of size (# observations)
'''
# normalize data
if normalize:
df_scaled = StandardScaler().fit_transform(df)
df = pd.DataFrame(df_scaled, columns=df.columns, index=df.index)
else:
raise Exception('Not implemented')
# calculate multivariate dissimilarity
diss = ((df - df.mean())**2).sum(axis=1)**(1/2)
return diss
def split_data(df, diss_var, dataset_names, threshold, dis_kws={}, **split_kws):
'''Function randomly splits data into two sets, calates multivariate
dissimilarity and keep all oultiers determined by dissimilarity
treshold in each set.
Args:
df: Data as dataframe of shape (# samles, # features)
diss_var: Names of variables to calculate dissimilarity measure
as list of strings
dataset_names: Names of datasets as list of strings
threshold: Threshold for dissimilarity measure
to determine outliers as float
dis_kws: Key word arguments of dissimilarity function as dictionary
split_kws: Key word arguents of train_test_split function
Returns:
datasets: Dictionary of splitted datasets as dataframe
'''
# calculate dissimilarity series
dis_kws['normalize'] = (True if 'normalize' not in dis_kws
else dis_kws['normalize'])
dissimilarity = get_dissimilarity(df[diss_var], dis_kws['normalize'])
# Pop outlier customers
ext_mask = (dissimilarity > threshold)
X_ext = df.loc[ext_mask]
X = df.loc[~ext_mask]
# drop one random sample to keep even samples in dataset
# for purpose of having same number of samples after splitting
if X.shape[0] % 2 != 0:
split_kws['random_state'] = (1 if 'random_state' not in split_kws
else split_kws['random_state'])
remove_n = 1
drop_indices = (X.sample(remove_n,
random_state=split_kws['random_state'])
.index)
X = X.drop(drop_indices)
# Random split of sample in two groups
Xa, Xb = train_test_split(X, **split_kws)
datasets = [Xa, Xb]
# add outliers to each group
datasets = {dataset_name: dataset
for dataset_name, dataset in zip(dataset_names, datasets)}
for name, dataset in datasets.items():
datasets[name] = dataset.append(X_ext)
return datasets
def analyze_cluster_solution(df, vars_, labels, **kws):
'''Analyzes cluster solution. Following analyses are done:
1) Hypothesis testing of clusters averages difference
a) One way ANOVA
b) ANOVA assumptions
- residuals normality test: Shapiro-Wilk test
- equal variances test: Leven's test
c) Kruskal-Wallis non parametric test
d) All-Pair non parametric test, Conover test by default
2) Cluster profile vizualization
3) Cluster scatterplot vizualization
Args:
df: Dataset as pandas dataframe
of shape(# observations, # variables)
vars_: Clustering variables as list of strings
labels: Variable holding cluster labels as string
kws: Key words arguments of post-hoc test
Returns:
summary: Dataframe of hypothesis tests
post_hoc: List of post_hoc test for each clustering variable
prof_ax: Axes of profile vizualization
clst_pg: PairGrid of cluster vizulization
'''
def color_not_significant_red(val, signf=0.05):
'''Takes a scalar and returns a string withthe css property
`'color: red'` for non significant p_value
'''
color = 'red' if val > signf else 'black'
return 'color: %s' % color
# get number of seeds
num_seeds = len(df.groupby(labels).groups)
# run tests
kws['post_hoc_fnc'] = (sp.posthoc_conover if 'post_hoc_fnc' not in kws
else kws['post_hoc_fnc'])
summary, post_hoc = profile_cluster_labels(
df, labels, vars_, **kws)
# print hypothesis tests
str_ = 'PROFILE SUMMARY FOR {}'.format(labels.upper())
print(str_ + '\n' + '-' * len(str_) + '\n')
str_ = 'Hypothesis testing of clusters averages difference'
print(str_ + '\n' + '-' * len(str_))
display(summary.round(2))
# print post-hoc tests
str_ = '\nPost-hoc test: {}'.format(kws['post_hoc_fnc'].__name__)
print(str_ + '\n' + '-' * len(str_) + '\n')
for var in post_hoc:
print('\nclustering variable:', var)
display(post_hoc[var].round(2)
.style.applymap(color_not_significant_red))
# print profiles
str_ = '\nProfile vizualization'
print(str_ + '\n' + '-' * len(str_))
prof_ax = (df
.groupby(labels)
[vars_]
.mean()
.transpose()
.plot(title='Cluster Profile')
)
plt.ylabel('Standardized scale')
plt.xlabel('Clustering variables')
plt.show()
# print scatterplots
str_ = '\nClusters vizualization'
print(str_ + '\n' + '-' * len(str_))
clst_pg = sns.pairplot(x_vars=['recency', 'monetary'],
y_vars=['frequency', 'monetary'],
hue=labels, data=df, height=3.5)
clst_pg.set(yscale='log')
clst_pg.axes[0, 1].set_xscale('log')
clst_pg.fig.suptitle('Candidate Solution: {} seeds'
.format(num_seeds), y=1.01)
plt.show()
return summary, post_hoc, prof_ax, clst_pg
def profile_cluster_labels(df, group, outputs, post_hoc_fnc=sp.posthoc_conover):
'''Test distinctiveness of cluster (group) labes across clustering (output)
variables using one way ANOVA, shapiro_wilk normality test,
leven's test of equal variances, Kruskla-Wallis non parametric tests and
selected all-pairs post hoc test for each output variables.
Args:
df: Data with clustering variables and candidate solutions
as dataframe of shape (# samples, # of variables +
candidate solutions)
group: group variables for hypothesis testing as string
output: output variables for hypothesis testing as list of string
Returns:
results: Dataframe of hypothesis tests for each output
'''
# initiate summmary dataframe
summary = (df.groupby(group)[outputs]
.agg(['mean', 'median'])
.T.unstack(level=-1)
.swaplevel(axis=1)
.sort_index(level=0, axis=1))
# initiate posthoc dictionary
post_hoc = {}
# cycle over ouptputs
for i, output in enumerate(outputs):
# split group levels
levels = [df[output][df[group] == level]
for level in df[group].unique()]
# calculate F statistics and p-value
_, summary.loc[output, 'anova_p'] = stats.f_oneway(*levels)
# calculate leven's test for equal variances
_, summary.loc[output, 'levene_p'] = stats.levene(*levels)
# check if residuals are normally distributed by shapiro wilk test
model = ols('{} ~ C({})'.format(output, group), data=df).fit()
_, summary.loc[output, 'shapiro_wilk_p'] = stats.shapiro(model.resid)
# calculate H statistics and p-value for Kruskal Wallis test
_, summary.loc[output, 'kruskal_wallis_p'] = stats.kruskal(*levels)
# multiple comparison Conover's test
post_hoc[output] = post_hoc_fnc(
df, val_col=output, group_col=group) #, p_adjust ='holm')
return summary, post_hoc
def get_missmatch(**kws):
'''
Cross tabulates dataframe on 2 selected columns and
calculates missmatch proportion of rows and total
Args:
kws: Key word arguments to pd.crosstab function
Returns:
crosst_tab: result of cross tabulation as dataframe
missmatch_rows: missmatch proportion by rows as series
total_missmatch: total missmatch proportion as float
'''
cross_tab = pd.crosstab(**kws)
missmatch_rows = (cross_tab.sum(axis=1) - cross_tab.max(axis=1))
total_missmatch = missmatch_rows.sum() / cross_tab.sum().sum()
missmatch_rows = missmatch_rows / cross_tab.sum(axis=1)
missmatch_rows.name = 'missmatch_proportion'
return cross_tab, missmatch_rows, total_missmatch
def query_product_info(client, query_params):
'''Query product information from bigquery database.
Distinct records of product_sku, product_name,
product_brand, product_brand_grp,
product_category, product_category_grp,
Args:
client: Instatiated bigquery.Client to query distinct product
description(product_sku, product_name, product_category,
product_category_grp)
query_params: Query parameters for client
Returns:
product_df: product information as distict records
as pandas dataframe (# records, # variables)
'''
# Check arguments
# ----------------
assert isinstance(client, bigquery.Client)
assert isinstance(query_params, list)
# Query distinct products descriptions
# ------------------------------------
query='''
SELECT DISTINCT
hits_product.productSku AS product_sku,
hits_product.v2productName AS product_name,
hits_product.productBrand AS product_brand,
hits.contentGroup.contentGroup1 AS product_brand_grp,
hits_product.v2productCategory AS product_category,
hits.contentGroup.contentGroup2 AS product_category_grp
FROM
`bigquery-public-data.google_analytics_sample.ga_sessions_*`
LEFT JOIN UNNEST(hits) AS hits
LEFT JOIN UNNEST(hits.product) AS hits_product
WHERE
_TABLE_SUFFIX BETWEEN @start_date AND @end_date
AND hits_product.productSku IS NOT NULL
ORDER BY
product_sku
'''
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = query_params
df = client.query(query, job_config=job_config).to_dataframe()
return df
def reconstruct_brand(product_sku, df):
'''Reconstructs brand from product name and brand variables
Args:
product_sku: product_sku as of transaction records on product level
of size # transactions on produc level
df: Product information as output of
helper.query_product_info in form of dataframe
of shape (# of distinct records, # of variables)
Returns:
recon_brand: reconstructed brand column as pandas series
of size # of transactions
'''
# predict brand name from product name for each sku
# -------------------------------------------------
# valid brands
brands = ['Android',
'Chrome',
r'\bGo\b',
'Google',
'Google Now',
'YouTube',
'Waze']
# concatenate different product names for each sku
brand_df = (df[['product_sku', 'product_name']]
.drop_duplicates()
.groupby('product_sku')
['product_name']
.apply(lambda product_name: ' '.join(product_name))
.reset_index()
)
# drop (no set) sku's
brand_df = brand_df.drop(
index=brand_df.index[brand_df['product_sku'] == '(not set)'])
# predict brand name from product name for each sku
brand_df['recon_brand'] = (
brand_df['product_name']
.str.extract(r'({})'.format('|'.join(set(brands)),
flags=re.IGNORECASE))
)
# adjust brand taking account spelling errors in product names
brand_df.loc[
brand_df['product_name'].str.contains('You Tube', case=False),
'recon_brand'
] = 'YouTube'
# predict brand name from brand variables for sku's where
# brand couldn't be predected from product name
# --------------------------------------------------------
# get distinct product_sku and brand variables associations
brand_vars = ['product_brand', 'product_brand_grp']
brand_var = dict()
for brand in brand_vars:
brand_var[brand] = (df[['product_sku', brand]]
.drop(index=df.index[(df['product_sku'] == '(not set)')
| df['product_sku'].isna()
| (df[brand] == '(not set)')
| df[brand].isna()])
.drop_duplicates()
.drop_duplicates(subset='product_sku', keep=False))
# check for brand abiguity at sku level
old_brand = brand_var['product_brand'].set_index('product_sku')
new_brand = brand_var['product_brand_grp'].set_index('product_sku')
shared_sku = old_brand.index.intersection(new_brand.index)
if not shared_sku.empty:
# delete sku's with abigious brands
ambigious_sku = shared_sku[
old_brand[shared_sku].squeeze().values
!= new_brand[shared_sku].squeeze().values
]
old_brand = old_brand.drop(index=ambigious_sku, errors='ignore')
new_brand = new_brand.drop(index=ambigious_sku, errors='ignore')
# delete sku's with multiple brands in new_brand
multiple_sku = shared_sku[
old_brand[shared_sku].squeeze().values
== new_brand[shared_sku].squeeze().values
]
new_brand = new_brand.drop(index=multiple_sku, errors='ignore')
# concatenate all associations of brand variables and product sku's
brand_var = pd.concat([old_brand.rename(columns={'product_brand':
'recon_brand_var'}),
new_brand.rename(columns={'product_brand_grp':
'recon_brand_var'})])
# predict brand name from brand variables
brand_df.loc[brand_df['recon_brand'].isna(), 'recon_brand'] = (
pd.merge(brand_df['product_sku'], brand_var, on='product_sku', how='left')
['recon_brand_var']
)
# recode remaining missing (not set) brands by Google brand
# ---------------------------------------------------------
brand_df['recon_brand'] = brand_df['recon_brand'].fillna('Google')
# predict brand from brand names and variables on transaction data
# ----------------------------------------------------------------
recon_brand = (pd.merge(product_sku.to_frame(),
brand_df[['product_sku', 'recon_brand']],
on='product_sku',
how='left')
.reindex(product_sku.index)
['recon_brand'])
return recon_brand
def reconstruct_category(product_sku, df, category_spec):
'''Reconstructs category from category variables and product names.
Args:
product_sku: product_sku from transaction records on product level
of size # transactions on product level
df: Product information as output of
helper.query_product_info in form of dataframe
of shape (# of distinct records, # of variables)
category_spec: Dictionary with keys as category variable names
and values as mappings between category variable levels
to category labels in form of dataframe
Returns:
recon_category: reconstructed category column as pandas series
of size # of trasactions on product level
category_df: mappings of unique sku to category labels
'''
# Check arguments
# ----------------
assert isinstance(product_sku, pd.Series)
assert isinstance(df, pd.DataFrame)
assert isinstance(category_spec, dict)
# reconstruct category name from product name for each sku
# --------------------------------------------------------
def get_category_representation(category_label, valid_categories):
'''Handle multiple categories assigned to one sku.
For ambigious categories returns missing value.
Args:
category_label: Series of category labels for
particular sku
valid_categories: Index of valid unique categories
Returns:
label: valid category label or missing value
'''
label = valid_categories[valid_categories.isin(category_label)]
if label.empty or label.size > 1:
return np.nan
else:
return label[0]
def label_category_variable(df, category_var, label_spec):
'''reconstruct category labels from category variable.
Args:
df: Product information dataframe.
category_var: Name of category variabel to reconstruct labels
label_spec: Label mapping between category variable levels
and labels.
Returns:
var_label: Label mapping to sku as dataframe
'''
valid_categories = pd.Index(label_spec
.groupby(['category_label'])
.groups
.keys())
var_label = (pd.merge(df[['product_name', category_var]]
.drop_duplicates(),
label_spec,
how='left',
on=category_var)
[['product_name', 'category_label']]
.groupby('product_name')
['category_label']
.apply(get_category_representation,
valid_categories=valid_categories)
.reset_index())
return var_label
def screen_fit_model(data):
'''Screens Naive Bayes Classifiers and selects best model
based on f1 weigted score. Returns fitted model and score.
Args:
data: Text and respective class labels as dataframe
of shape (# samples, [text, labels])
Returns:
model: Best fitted sklearn model
f1_weighted_score: Test f1 weighted score
Note: Following hyperparameters are tested
Algorithm: MultinomialNB, ComplementNB
ngrams range: (1, 1), (1, 2), (1, 3)
binarization: False, True
'''
# vectorize text inforomation in product_name
def preprocessor(text):
# not relevant words
not_relevant_words = ['google',
'youtube',
'waze',
'android']
# transform text to lower case and remove punctuation
text = ''.join([word.lower() for word in text
if word not in string.punctuation])
# tokenize words
tokens = re.split('\W+', text)
# Drop not relevant words and lemmatize words
wn = nltk.WordNetLemmatizer()
text = ' '.join([wn.lemmatize(word) for word in tokens
if word not in not_relevant_words + STOPWORDS])
return text
# define pipeline
pipe = Pipeline([('vectorizer', CountVectorizer()),
('classifier', None)])
# define hyperparameters
param_grid = dict(vectorizer__ngram_range=[(1, 1), (1, 2), (1, 3)],
vectorizer__binary=[False, True],
classifier=[MultinomialNB(),
ComplementNB()])
# screen naive buyes models
grid_search = GridSearchCV(pipe, param_grid=param_grid, cv=5,
scoring='f1_weighted', n_jobs=-1)
# devide dataset to train and test set using stratification
# due to high imbalance of lables frequencies
x_train, x_test, y_train, y_test = train_test_split(
data['product_name'],
data['recon_category'],
test_size=0.25,
stratify=data['recon_category'],
random_state=1)
# execute screening and select best model
grid_search.fit(x_train, y_train)
# calculate f1 weighted test score
y_pred = grid_search.predict(x_test)
f1_weigted_score = f1_score(y_test, y_pred, average='weighted')
return grid_search.best_estimator_, f1_weigted_score
# reconstruct category label from cateogry variables
recon_labels = dict()
for var, label_spec in category_spec.items():
recon_labels[var] = (label_category_variable(df, var, label_spec)
.set_index('product_name'))
recon_labels['product_category'][
recon_labels['product_category'].isna()
] = recon_labels['product_category_grp'][
recon_labels['product_category'].isna()
]
# reconstruct category label from produc names
valid_categories = pd.Index(category_spec['product_category_grp']
.groupby(['category_label'])
.groups
.keys())
category_df = (pd.merge(df[['product_sku', 'product_name']]
.drop_duplicates(),
recon_labels['product_category'],
how='left',
on = 'product_name')
[['product_sku', 'product_name', 'category_label']]
.groupby('product_sku')
.agg({'product_name': lambda name: name.str.cat(sep=' '),
'category_label': lambda label:
get_category_representation(label, valid_categories)})
.reset_index())
category_df.rename(columns={'category_label': 'recon_category'},
inplace=True)
# associate category from category names and variables on transaction data
# ------------------------------------------------------------------------
recon_category = (pd.merge(product_sku.to_frame(),
category_df[['product_sku', 'recon_category']],
on='product_sku',
how='left')
)
# predict category of transactions where category is unknown
# Multinomial and Complement Naive Bayes model is screened
# and finetuned using 1-grams, 2-grams and 3-grams
# as well as binarization (Tru or False)
# best model is selected based on maximizing test f1 weigted score
# ----------------------------------------------------------------
# screen best model and fit it on training data
model, f1_weighted_score = screen_fit_model(
category_df[['product_name', 'recon_category']]
.dropna()
)
# predict category labels if model has f1_weighted_score > threshold
f1_weighted_score_threshold = 0.8
if f1_weighted_score < f1_weighted_score_threshold:
raise Exception(
'Accuracy of category prediction below threshold {:.2f}'
.format(f1_weighted_score_threshold))
else:
product_name = (pd.merge(recon_category
.loc[recon_category['recon_category'].isna(),
['product_sku']],
category_df[['product_sku', 'product_name']],
how='left',
on='product_sku')
['product_name'])
category_label = model.predict(product_name)
recon_category.loc[recon_category['recon_category'].isna(),
'recon_category'] = category_label
return recon_category['recon_category']
def reconstruct_sales_region(subcontinent):
'''Reconstruct sales region from subcontinent'''
if (pd.isna(subcontinent)
or subcontinent.lower() == '(not set)'):
sales_region = np.nan
elif ('africa' in subcontinent.lower()
or 'europe' in subcontinent.lower()):
sales_region = 'EMEA'
elif ('caribbean' in subcontinent.lower()
or subcontinent.lower() == 'central america'):
sales_region = 'Central America'
elif subcontinent.lower() == 'northern america':
sales_region = 'North America'
elif subcontinent.lower() == 'south america':
sales_region = 'South America'
elif ('asia' in subcontinent.lower()
or subcontinent.lower() == 'australasia'):
sales_region = 'APAC'
else:
raise Exception(
'Can not assign sales region to {} subcontinent'
.format(subcontinent))
return sales_region
def reconstruct_traffic_keyword(text):
'''Reconstructs traffic keywords to more simple representation'''
# if empty rename to not applicable
if pd.isna(text):
text = '(not applicable)'
# if one word with mixed numbers & letters rename to (not relevant)
elif re.search(r'(?=.*\d)(?=.*[A-Z=\-])(?=.*[a-z])([\w=-]+)', text):
text = '(not relevant)'
elif ((text != '(not provided)')
and (re.search('(\s+)', text) is not None)):
# transform text to lower case and remove punctuation
text = ''.join([word.lower() for word in text
if word not in string.punctuation.replace('/', '')])
# tokenize words
tokens = re.split('\W+|/', text)
# Drop not relevant words and lemmatize words
wn = nltk.WordNetLemmatizer()
text = ' '.join([wn.lemmatize(word) for word in tokens
if word not in STOPWORDS])
return text
def aggregate_data(df):
'''Encode and aggregate engineered and missing value free data
on client level
Args:
df: engineered and missing value free data as
pandas dataframe of shape (# transaction items, # variables)
agg_df: encoded and aggregated dataframe
of shape(# clients, # encoded & engineered variables)
with client_id index
'''
# identifiers
id_vars = pd.Index(
['client_id',
'session_id',
'transaction_id',
'product_sku']
)
# session variables
session_vars = pd.Index(
['visit_number', # avg_visits
'date', # month, week, week_day + one hot encode + sum
'pageviews', # avg_pageviews
'time_on_site', # avg_time_on_site
'ad_campaign', # sum
'source', # one hot encode + sum
'browser', # one hot encode + sum
'operating_system', # one hot encode + sum
'device_category', # one hot encode + sum
'continent', # one hot encode + sum
'subcontinent', # one hot encode + sum
'country', # one hot encode + sum
'sales_region', # one hot encode + sum
'social_referral', # sum
'social_network', # one hot encode + sum
'channel_group'] # one hot encode + sum
)
# group session variables from item to session level
session_df = (df[['client_id',
'session_id',
*session_vars.to_list()]]
.drop_duplicates()
# drop ambigious region 1 case
.drop_duplicates(subset='session_id'))
# reconstruct month, weeek and week day variables
# session_df['month'] = session_df['date'].dt.month
# session_df['week'] = session_df['date'].dt.week
session_df['week_day'] = session_df['date'].dt.weekday + 1
session_df = session_df.drop(columns='date')
# encode variables on session level
keep_vars = [
'client_id',
'session_id',
'visit_number',
'pageviews',
'time_on_site',
'social_referral',
'ad_campaign'
]
encode_vars = session_df.columns.drop(keep_vars)
enc_session_df = pd.get_dummies(session_df,
columns=encode_vars.to_list(),
prefix_sep='*')
# remove not relevant encoded variables
enc_session_df = enc_session_df.drop(
columns=enc_session_df.columns[
enc_session_df.columns.str.contains('not set|other')
]
)
# summarize session level variables on customer level
sum_vars = (pd.Index(['social_referral', 'ad_campaign'])
.append(enc_session_df
.columns
.drop(keep_vars)))
client_session_sum_df = (enc_session_df
.groupby('client_id')
[sum_vars]
.sum())
client_session_avg_df = (
enc_session_df
.groupby('client_id')
.agg(avg_visits=('visit_number', 'mean'),
avg_pageviews=('pageviews', 'mean'),
avg_time_on_site=('time_on_site', 'mean'))
)
client_session_df = pd.concat([client_session_avg_df,
client_session_sum_df],
axis=1)
# product level variables
product_vars = pd.Index([
# 'product_name', # one hot encode + sum
'product_category', # one hot encode + sum
'product_price', # avg_product_revenue
'product_quantity', # avg_product_revenue
'hour'] # one hot encoded + sum
)
avg_vars = pd.Index([
'product_price',
'product_quantity'
])
sum_vars = pd.Index([
# 'product_name',
'product_category',
'hour'
])
enc_product_df = pd.get_dummies(df[id_vars.union(product_vars)],
columns=sum_vars,
prefix_sep='*')
# summarize product level variables on customer level
client_product_sum_df = (enc_product_df
.groupby('client_id')
[enc_product_df.columns.drop(avg_vars)]
.sum())
def average_product_vars(client):
d = {}
d['avg_product_revenue'] = ((client['product_price']
* client['product_quantity'])
.sum()
/ client['product_quantity'].sum())
# ipdb.set_trace(context=15)
d['avg_unique_products'] = (client
.groupby('transaction_id')
['product_sku']
.apply(lambda sku: len(sku.unique()))
.mean())
return pd.Series(d, index=['avg_product_revenue',
'avg_unique_products'])
client_product_avg_df = (enc_product_df
.groupby('client_id')
.apply(average_product_vars))
client_product_df = pd.concat([client_product_avg_df,
client_product_sum_df]
, axis=1)
agg_df = pd.concat([client_session_df,
client_product_df],
axis=1)
return agg_df
def do_pca(X_std, **kwargs):
'''# Apply PCA to the data.'''
pca = PCA(**kwargs)
model = pca.fit(X_std)
X_pca = model.transform(X_std)
return pca, X_pca
def scree_pca(pca, plot=False, **kwargs):
'''Investigate the variance accounted for by each principal component.'''
# PCA components
n_pcs = len(pca.components_)
pcs = pd.Index(range(1, n_pcs+1), name='principal component')
# Eigen Values
eig = pca.explained_variance_.reshape(n_pcs, 1)
eig_df = pd.DataFrame(np.round(eig, 2), columns=['eigen_value'], index=pcs)
eig_df['cum_eigen_value'] = np.round(eig_df['eigen_value'].cumsum(), 2)
# Explained Variance %
var = pca.explained_variance_ratio_.reshape(n_pcs, 1)
var_df = pd.DataFrame(np.round(var, 4),
columns=['explained_var'],
index=pcs)
var_df['cum_explained_var'] = (np.round(var_df['explained_var'].cumsum()
/ var_df['explained_var'].sum(), 4))
df = pd.concat([eig_df, var_df], axis=1)
if plot:
# scree plot limit
limit = pd.DataFrame(np.ones((n_pcs, 1)),
columns=['scree_plot_limit'], index=pcs)
ax = (pd.concat([df, limit], axis=1)
.plot(y=['eigen_value', 'explained_var', 'scree_plot_limit'],
title='PCA: Scree test & Variance Analysis', **kwargs)
)
df.plot(y=['cum_explained_var'], secondary_y=True, ax=ax)
return df
def get_pc_num(scree_df, pc_num = None, exp_var_threshold=None,
eig_val_threshold=1):
'''
Selects optimum number of prinipal components according specified ojectives
wheter % of explained variance or eig_val criterion
Args:
scree_df: Dataframe as ouptu of scree_pca function
exp_var_threshold: threshold for cumulative % of epxlained variance
eig_val_threshold: min eigen value, 1 by default
Returns:
pc_num: Number of selelected principal components
exp_var: Explained variance by selected components
sum_eig: Sum of eigen values of selected components
'''
# check arguments
assert pc_num is None or pc_num <= scree_df.index.size
assert exp_var_threshold is None or (0 < exp_var_threshold <= 1)
assert 0 < eig_val_threshold < scree_df.index.size
assert (pc_num is None or exp_var_threshold is not None) or \
(pc_num is not None or exp_var_threshold is None), \
('''Either number of principal components or minimum variance
explained should be selected''')
if exp_var_threshold:
pcs = scree_df.index[scree_df['cum_explained_var'] <= exp_var_threshold]
elif pc_num:
pcs = scree_df.index[range(1, pc_num+1)]
elif exp_var_threshold is None:
pcs = scree_df.index[scree_df['eigen_value'] > eig_val_threshold]
pc_num = len(pcs)
exp_var = scree_df.loc[pc_num, 'cum_explained_var']
sum_eig = scree_df.loc[[*pcs], 'eigen_value'].sum()
return pc_num, exp_var, sum_eig
def varimax(factor_df, **kwargs):
'''
varimax rotation of factor matrix
Args:
factor_df: factor matrix as pd.DataFrame with shape
(# features, # principal components)
Return:
rot_factor_df: rotated factor matrix as pd.DataFrame
'''
factor_mtr = df2mtr(factor_df)
varimax = robjects.r['varimax']
rot_factor_mtr = varimax(factor_mtr)
return pandas2ri.ri2py(rot_factor_mtr.rx2('loadings'))
def get_components(df, pca, rotation=None, sort_by='sig_ld',
feat_details=None, plot='None', **kwargs):
'''
Show significant factor loadings depending on sample size
Args:
df: data used for pca as pd.DataFrame
pca: fitted pca object
rotation: if to apply factor matrix rotation, by default None.
sort_by: sort sequence of components, by default accoring
number of significant loadings 'sig_load'
feat_details: Dictionary of mapped feature detials, by default None
plot: 'discrete' plots heatmap enhancing sifinigicant laodings
'continuous' plots continous heatmap,
by default None
Returns:
factor_df: factor matrix as pd.DataFrame
of shape (# features, # components)
sig_ld: number of significant loadings across components as
pd. Series of size # components
cross_ld: number of significant loadings across features
(cross loadings) as pd. Series of size # features
'''
# constants
# ---------
maxstr = 100 # amount of the characters to print
# guidelines for indentifying significant factor loadings
# based on sample size. Source: Multivariate Data Analysis. 7th Edition.
factor_ld = np.linspace(0.3, 0.75, 10)
signif_sz = np.array([350, 250, 200, 150, 120, 100, 85, 70, 60, 50])
# loadings significant treshold
ld_sig = factor_ld[len(factor_ld) - (signif_sz <= df.index.size).sum()]
if rotation == 'varimax':
components = varimax(
|
pd.DataFrame(pca.components_.T)
|
pandas.DataFrame
|
################################################################
# ---------- Network Gene Name Conversion Functions ---------- #
################################################################
import requests
import re
import time
import pandas as pd
# Determine if id to be input is a valid gene name (does not contain parentheses or quotations or whitespace)
def exclude_id(name, bad_prefixes=None):
excluded_id_regex = re.compile('[(),\'\"\s\/\|\.<>]+')
# Remove genes that may also have prefixes that we do not want (e.g. CHEBI)
if bad_prefixes:
for prefix in bad_prefixes:
if name.startswith(prefix):
return True
return excluded_id_regex.search(name)
# Remove the naming system prefix, if there is one
def get_identifier_without_prefix(string):
elements = string.split(':')
length = len(elements)
if length is 2:
return str(elements[1])
elif length > 2:
return None
else:
return string
# Construct string for bach query to MyGene.Info v3.0.0 API
def query_constructor(gene_list, exclude_prefixes=None, print_invalid_genes=False):
# Find genes that are valid and return only gene identifiers
valid_query_genes = [get_identifier_without_prefix(
gene) for gene in gene_list if exclude_id(gene, exclude_prefixes) == None]
# Find all genes that have invalid names
invalid_query_genes = [gene for gene in gene_list if exclude_id(
gene, exclude_prefixes) != None]
print(len(valid_query_genes), "Valid Query Genes")
if print_invalid_genes:
print(len(invalid_query_genes), "Invalid Query Genes:")
print(invalid_query_genes)
else:
print(len(invalid_query_genes), "Invalid Query Genes")
# Build string of names to input into MyGene.Info
query_string = ' '.join(valid_query_genes)
return query_string, valid_query_genes, invalid_query_genes
# Function for posting batch query to MyGene.info v3.0.0 API
def query_batch(query_string, tax_id='9606', scopes="symbol, entrezgene, alias, uniprot", fields="symbol, entrezgene"):
query_split = query_string.split(' ')
query_n = len(query_split)
query_time = time.time()
if query_n <= 1000:
data = {'species': tax_id, # Human Only
'scopes': scopes, # Default symbol, entrez, alias, uniprot. Alias often returns more genes than needed, return only higest scoring genes
'fields': fields, # Which gene name spaces to convert to
'q': query_string}
res = requests.post('http://mygene.info/v3/query', data)
json = res.json()
else:
# If the query is too long, we will need to break it up into chunks of 1000 query genes (MyGene.info cap)
if query_n % 1000 == 0:
chunks = query_n / 1000
else:
chunks = (query_n / 1000) + 1
query_chunks = []
for i in range(chunks):
start_i, end_i = i*1000, (i+1)*1000
query_chunks.append(' '.join(query_split[start_i:end_i]))
json = []
for chunk in query_chunks:
data = {'species': '9606', # Human Only
# Default symbol, entrez, alias, uniprot. Alias often returns more genes than needed, return only higest scoring genes
'scopes': "entrezgene, retired",
'fields': "symbol, entrezgene", # Which gene name spaces to convert to
'q': chunk}
res = requests.post('http://mygene.info/v3/query', data)
json = json+res.json()
print(len(json), 'Matched query results')
print('Batch query complete:', round(time.time()-query_time, 2), 'seconds')
return json
# Construct matched queries maps
def construct_query_map_table(query_result, query_genes, display_unmatched_queries=False):
construction_time = time.time()
# Construct DataFrame of matched queries (only keep the results for each query where both symbol and entrez id were mapped)
matched_data, matched_genes = [], []
for match in query_result:
if match.get('entrezgene') and match.get('symbol'):
matched_data.append([match.get('query'), match.get(
'_score'), match.get('symbol'), str(match.get('entrezgene'))])
matched_genes.append(match.get('query'))
# Add all other partial mappings or non-mappings to the list
partial_match_genes = [
gene for gene in query_genes if gene not in matched_genes]
partial_match_results = []
for match in query_result:
if match.get('query') in partial_match_genes:
partial_match_results.append(match)
# If there if an entrez gene, we want that that in string form, otherwise we want None
if match.get('entrezgene'):
matched_data.append([match.get('query'), match.get(
'_score'), match.get('symbol'), str(match.get('entrezgene'))])
else:
matched_data.append([match.get('query'), match.get(
'_score'), match.get('symbol'), match.get('entrezgene')])
print('Queries with partial matching results found:',
len(partial_match_results))
if display_unmatched_queries:
for entry in partial_match_results:
print(entry)
# Convert matched data list into data frame table
match_table = pd.DataFrame(data=matched_data, columns=[
'Query', 'Score', 'Symbol', 'EntrezID'])
match_table = match_table.set_index('Query')
# Some genes will be matched in duplicates (due to alias mapping, generally the highest scoring matches will be correct)
# Therefore we remove duplicate mappings to create 1-to-1 mappings for query to genes.
duplicate_matched_genes = []
for gene in matched_genes:
if type(match_table.loc[gene]) == pd.DataFrame:
duplicate_matched_genes.append(gene)
print
print(len(duplicate_matched_genes), "Queries with mutliple matches found")
# Construct mapping table of genes with only one full result
single_match_genes = [
gene for gene in query_genes if gene not in duplicate_matched_genes]
match_table_single = match_table.loc[single_match_genes]
# Keep matches of queries matched only once if there are duplicate matches for genes
if len(duplicate_matched_genes) > 0:
# Keep maximum scored matches of queries matched more than once
max_score_matches = []
for gene in duplicate_matched_genes:
matched_duplicates = match_table.loc[gene]
max_score = max(matched_duplicates['Score'])
max_score_matches.append(
matched_duplicates[matched_duplicates['Score'] == max_score])
match_table_duplicate_max =
|
pd.concat(max_score_matches)
|
pandas.concat
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
if __name__ == "__main__":
mode = 1
if mode == 0:
df0 = pd.read_csv("results/evolution_clusters_statistics_on_1000.csv")
df1 = pd.read_csv("results/evolution_clusters_statistics_on_500.csv")
df2 =
|
pd.read_csv("results/evolution_clusters_statistics_on_200.csv")
|
pandas.read_csv
|
'''
- This script finds the best lightgbm parameter combinations for an arbitrary list of models, so it's fully scalable.
- It uses a cross validated grid search, with values to be specified at execution, to widen or narrow search space.
- In a variety of different ways, you can specify features to match and exclude from the training dataset.
- You must also specify how many event-snapshots you want to consider (periods).
- You can also specify optimizing and training sampling.
- Contains a decorrelator function to remove highly correlated features and speed up training.
- Regardless of grid search cv-folds specification, does an extra 10-folds cv for final eval metric.
- Discards useless features by removing those who are not used for any splits, helping make more efficient future ETL pipelines.
- Saves several metadata reports.
- Compares current results to results obtained last month.
'''
# Nota técnica LGBM: es importante que la LISTA DE VARIABLES sobre la cual se hace x_train (values) sea siempre la misma (train-deploy) ya que de ahí sale el ORDEN de las columnas, que en la DF de base es indistinto si la lista de variables tiene el orden correcto.
# Como las variables se seleccionan con el período más nuevo, es posible que el más nuevo tenga MENOS variables pero no MÁS, porque al tirarse para atrás faltarán.
import pandas as pd
import numpy as np
import lightgbm as lgb
import datetime
import math
import itertools
import sys
import pkg_resources
import time
# ---- FUNCIONES
def tdelta_store(secdelta):
dd = [math.floor(secdelta / (24*60*60)),'d']
hh = [math.floor(secdelta / (60*60) - 24*dd[0]),'h']
mm = [math.floor(secdelta / 60 - 24*60*dd[0] - 60*hh[0]),'m']
ss = [math.floor(secdelta - 24*60*60*dd[0] - 60*60*hh[0] - 60*mm[0]),'s']
tdelta = ''
for x in (dd,hh,mm,ss):
x[0] = str(x[0])
if len(x[0]) == 1:
x[0] = '0' + x[0]
tdelta = tdelta + x[0] + x[1] + '-'
tdelta = tdelta [:-1]
return tdelta
def periods_lister (period,periods):
hora = str(datetime.datetime.time(datetime.datetime.now().replace(microsecond=0)))
print(hora + ' - Executing PERIODS_LISTER , PERIOD = ' + str(period) + ', PERIODS = ' + str(periods) + '..')
periods_list = []
date = datetime.date(int('20'+str(period)[0:2]),int(str(period)[2:4]),1)
for x in range(periods):
if x == 0:
dxdate = date
else:
dxdate = dxdate - datetime.timedelta(days=1)
dxdate = dxdate.replace(day=1)
if len(str(dxdate.month)) == 1:
dxperiod_month = '0' + str(dxdate.month)
else:
dxperiod_month = str(dxdate.month)
periods_list.insert(x, str(dxdate.year)[2:4] + dxperiod_month)
print('Returned PERIODS_LIST = ' + str(periods_list) + '..')
return periods_list
def vars_remover_m (df,matches):
print ('Trying to drop specific matching vars..')
for x in matches:
try:
df = df.drop(x, axis = 1)
print ('DROPPED: ' + x)
except:
print ('FAILED: ' + x)
return df
def vars_remover_s(df,starting):
for x in starting:
print ('Trying to drop vars starting with: < ' + x + ' > ..')
lista = list(df.filter(regex = '^' + x))
df = df[df.columns.drop(lista)]
print ('Matched exactly ' + str(len(lista)) + ' vars.')
if len(lista) > 5:
print ('Showing first 5 dropped:')
lista = lista[0:5]
else:
print ('Dropped all these vars..')
print("\n".join(lista))
return df
def vars_remover_c_e (df,contain,exclude): # DROP VARS CONTANING ALL contain (list) STR, BUT NOT CONTAINING ALL exclude (list) STR
print ('Trying to drop vars contaning all strings in ' + ''.join(contain) + 'but not containing strings in ' + ''.join(exclude) + '..')
lists_c = []
for x in contain:
lista = list(df.filter(regex = x))
lists_c.append(lista)
lists_e = []
for x in exclude:
lista = list(df.filter(regex = x))
lists_e.append(lista)
for x in range(len(lists_c)):
if x == range(len(lists_c))[-1]: # si está en la última lista, no hacer nada
continue
if x == 0: # si es la primera, unir la primera con la segunda
lista_c_union = list(set(lists_c[x]).intersection(lists_c[x + 1]))
else: # si es alguna de las siguientes, unir la unión ya existe con la próxima
lista_c_union = list(set(lista_c_union).intersection(lists_c[x + 1]))
# ...se podría simplificar diciendo que en iter 0 lista_c_union = lists_c[x] y en subsiguientes es la inter...
print ('Vars found CONTAINING:')
for x in lista_c_union:
print (x)
for x in range(len(lists_e)):
if range(len(lists_e))[0] == range(len(lists_e))[-1]:
lista_e_union = lists_e[0]
if x == range(len(lists_e))[-1]:
continue
if x == 0:
lista_e_union = list(set(lists_e[x]).intersection(lists_e[x + 1]))
else:
lista_e_union = list(set(lista_e_union).intersection(lists_e[x + 1]))
print ('Vars found EXCLUDING:')
for x in lista_e_union:
print (x)
lista_exclude_final = list(set(lista_c_union) - set(lista_e_union))
print ('Vars REMOVING:')
for x in lista_exclude_final:
print (x)
df = df[df.columns.drop(lista_exclude_final)]
return df
def decorrelator (df,threshold):
hora = str(datetime.datetime.time(datetime.datetime.now().replace(microsecond=0)))
print('Running DECORRELATOR at: ' + hora + '..')
tmp = df.drop('TARGET', axis=1)
ori = len(tmp.columns)
tmp = tmp.loc[:,df.apply(pd.Series.nunique) != 1].corr()
uni = ori - len(tmp.columns)
vars_list = []
for i in tmp.columns:
tmp.loc[i,i] = 0
tmp = tmp.where(np.triu(np.ones(tmp.shape)).astype(np.bool))
for i in tmp.columns:
fijo = 0
for j in tmp.columns:
if not math.isnan(tmp.loc[j,i]):
if abs(tmp.loc[j,i]) > threshold:
fijo = 1
if fijo==0:
vars_list.append(i)
fin = len(vars_list)
cor = ori - uni - fin
del tmp
hora = str(datetime.datetime.time(datetime.datetime.now().replace(microsecond=0)))
print('Finished DECORRELATOR at: ' + hora + '..')
print('Originally ' + str(ori) + ' vars.')
print('Removed ' + str(ori - fin) + ' vars.')
print('Removed ' + str(uni) + ' vars for NO UNIQUES.')
print('Removed ' + str(cor) + ' vars for ' + str(threshold) + ' + CORRELATION.')
return vars_list
def target_sampler_ratio_f_t (df,ratio_f_t):
df_t = df[(df["TARGET"]==1)].reset_index(drop = True)
df_f = df[(df["TARGET"]==0)].sample(n = len(df_t) * ratio_f_t).reset_index(drop = True) # toma f segun la cantidad de t
df =
|
pd.concat([df_t,df_f])
|
pandas.concat
|
import os
import fnmatch
import numpy as np
import csv
import sys
import pandas as pd
import re
from sklearn import preprocessing
from scipy import signal
from scipy import stats
def readinputclustering(filename, preprocessingmode):
df = pd.read_csv(filename, header=None)
X = df.ix[:, 1::].astype(float)
X.fillna(0, inplace=True)
labels = df.ix[:, 0]
if preprocessing == 'log':
# log transform the dataframe cause values differ by orders of magnitude
X = np.log(X)
X[~np.isfinite(X)] = 0
labels = df.ix[:, 0]
else:
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(X)
X = pd.DataFrame(x_scaled)
labels = df.ix[:, 0]
return X, labels
# reads input for SAX time series discretization
def readMStimedomaintransient(filename):
MS = pd.read_csv(filename, sep =',')
return MS
def crawl_folders(path, extensions):
directories = []
for dirpath, dirnames, files in os.walk(path):
for directory in dirnames:
if directory != 'rawdata' and directory != 'spectrograms' and directory != 'spectrogrampics' and directory != 'results':
p = os.path.join(dirpath, directory)
directories.append(p)
return directories
# find files path, reads csv files only unless specified differently in extensions
def find_files(path, extensions):
# Allow both with ".csv" and without "csv" to be used for extensions
extensions = [e.replace(".", "") for e in extensions]
for dirpath, dirnames, files in os.walk(path):
for extension in extensions:
for f in fnmatch.filter(files, "*.%s" % extension):
p = os.path.join(dirpath, f)
yield (p, extension)
# maybe specify a limit parameter such that optionally
# only part of the spectrogram is examined for now leave whole
# spectrogram
# to make comparisons between m/z series normalization within and between samples is necessary
def read(filename):
spectrogram = pd.read_csv(filename, sep =',')
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(spectrogram)
arr2D = x_scaled
return arr2D
# read in original freq, mass, intensity data raw data from Basem
def readdataframe(filename):
sampleID = os.path.basename(filename)
originaldata = pd.read_table(filename, sep=',', header=0)
colnames = pd.Series(['Index', 'mass', 'freq', 'intensity'])
originaldata.columns = colnames
return originaldata, sampleID
def readdataframecsvrelativefreq(filename, lowerbound, upperbound):
sampleID = os.path.basename(filename).rstrip('.csv')
originaldata = pd.read_table(filename, sep=',', header=0)
# sample ID as placeholder variable for Intensity
colnames = pd.Series(['Index', 'freq', 'mass', sampleID])
originaldata.columns = colnames
mask = originaldata['mass'].between(lowerbound, upperbound, inclusive=True)
newdata = originaldata.loc[mask]
del newdata['mass']
del newdata['Index']
return newdata, sampleID
def readdataframecsvrelativefreqminmax(filename, sampling):
sampleID = os.path.basename(filename).rstrip('.csv')
originaldata = pd.read_table(filename, sep=',', header=0)
# sample ID as placeholder variable for Intensity
colnames =
|
pd.Series(['Index', 'freq', 'mass', sampleID])
|
pandas.Series
|
import os
import pandas as pd
import yaml
import glob
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from tqdm import tqdm
def load_raw_data(cfg, save_raw_df=True, rate_class='all'):
'''
Load all entries for water consumption and combine into a single dataframe
:param cfg: project config
:param save_raw_df: Flag indicating whether to save the accumulated raw dataset
:param rate_class: Rate class to filter raw data by
:return: a Pandas dataframe containing all water consumption records
'''
cat_feats = cfg['DATA']['CATEGORICAL_FEATS']
num_feats = cfg['DATA']['NUMERICAL_FEATS']
bool_feats = cfg['DATA']['BOOLEAN_FEATS']
feat_names = ['CONTRACT_ACCOUNT', 'EFFECTIVE_DATE', 'END_DATE', 'CONSUMPTION'] + num_feats + bool_feats + cat_feats
raw_data_filenames = glob.glob(cfg['PATHS']['RAW_DATA_DIR'] + "/*.csv")
rate_class_str = 'W&S_' + rate_class.upper()
print('Loading raw data from spreadsheets.')
raw_df = pd.DataFrame()
for filename in tqdm(raw_data_filenames):
df = pd.read_csv(filename, encoding='ISO-8859-1', low_memory=False, index_col=False) # Load a water demand CSV
if rate_class_str in df['RATE_CLASS'].unique().tolist():
df = df[df['RATE_CLASS'] == rate_class_str] # Filter by a rate class if desired
for f in df.columns:
if ' 'in f or '"' in f:
df.rename(columns={f: f.replace(' ', '').replace('"', '')}, inplace=True)
for f in feat_names:
if f not in df.columns:
if f in cat_feats:
df[f] = 'Unknown'
else:
df[f] = 0.0
if f in num_feats and df[f].dtype == 'object':
try:
df[f] = pd.to_numeric(df[f], errors='coerce')
df[f].fillna(0, inplace=True)
except Exception as e:
print("Exception ", e, " in file ", filename, " feature ", f)
df = df[feat_names]
df['EFFECTIVE_DATE'] = pd.to_datetime(df['EFFECTIVE_DATE'], errors='coerce')
df['END_DATE'] = pd.to_datetime(df['END_DATE'], errors='coerce')
raw_df = pd.concat([raw_df, df], axis=0, ignore_index=True) # Concatenate next batch of data
shape1 = raw_df.shape
raw_df.drop_duplicates(['CONTRACT_ACCOUNT', 'EFFECTIVE_DATE', 'END_DATE'], keep='last', inplace=True) # Drop duplicate entries appearing in different data slices
print("Deduplication: ", shape1, "-->", raw_df.shape)
print('Consumption total: ', raw_df['CONSUMPTION'].sum())
print(raw_df.shape)
# Replace X's representing true for boolean feats with 1
print('Cleaning data.')
raw_df[bool_feats] = raw_df[bool_feats].replace({'X': 1, 'On': 1, 'Discon': 0, ' ': 0})
raw_df['EST_READ'] = raw_df['EST_READ'].astype('object')
# Fill in missing data
if 'EST_READ' in cat_feats:
raw_df['EST_READ'] = raw_df['EST_READ'].astype('str') + '_' # Force treatment as string
raw_df[['CONSUMPTION'] + num_feats + bool_feats] = raw_df[['CONSUMPTION'] + num_feats + bool_feats].fillna(0)
raw_df[cat_feats] = raw_df[cat_feats].fillna('MISSING')
if save_raw_df:
raw_df.to_csv(cfg['PATHS']['RAW_DATASET'], sep=',', header=True, index_label=False, index=False)
return raw_df
def calculate_ts_data(cfg, raw_df, start_date=None):
'''
Calculates estimates for daily water consumption based on provided historical data. Assumes each client consumes
water at a uniform rate over the billing period. Produces a time series dataset indexed by date.
:param cfg: project config
:param raw_df: A DataFrame containing raw water consumption data
:param start_date: The minimum date at which at which to create daily estimates for
:return: a Pandas dataframe containing estimated daily water consumption
'''
print('Calculating estimates for daily consumption and contextual features.')
raw_df.drop('CONTRACT_ACCOUNT', axis=1, inplace=True)
if start_date is None:
min_date = raw_df['EFFECTIVE_DATE'].min()
else:
min_date = start_date
max_date = raw_df['END_DATE'].max() - timedelta(days=1)
cat_feats = cfg['DATA']['CATEGORICAL_FEATS']
num_feats = cfg['DATA']['NUMERICAL_FEATS']
bool_feats = cfg['DATA']['BOOLEAN_FEATS']
# Determine feature names for preprocessed dataset
date_range = pd.date_range(start=min_date, end=max_date)
daily_df_feat_init = {'Date': date_range, 'Consumption': 0}
for f in num_feats:
daily_df_feat_init[f + '_avg'] = 0.0
daily_df_feat_init[f + '_std'] = 0.0
for f in bool_feats:
daily_df_feat_init[f] = 0.0
for f in cat_feats:
for val in raw_df[f].unique():
daily_df_feat_init[f + '_' + str(val)] = 0.0
daily_df = pd.DataFrame(daily_df_feat_init)
daily_df.set_index('Date', inplace=True)
def daily_consumption(cons, start_date, end_date):
bill_period = (end_date - start_date + timedelta(days=1)).days # Get length of billing period
if bill_period > 0:
return cons / bill_period # Estimate consumption per day over billing period
else:
return 0
# Populating features for daily prediction
for date in tqdm(date_range):
daily_snapshot = raw_df.loc[(raw_df['EFFECTIVE_DATE'] <= date) & (raw_df['END_DATE'] >= date)]
for f in num_feats:
daily_df.loc[date, f + '_avg'] = daily_snapshot[f].mean()
daily_df.loc[date, f + '_std'] = daily_snapshot[f].std()
for f in bool_feats:
daily_df.loc[date, f] = daily_snapshot[f].mean()
for f in cat_feats:
fractions = daily_snapshot[f].value_counts(normalize=True)
for val, fraction in fractions.items():
daily_df.loc[date, f + '_' + str(val)] = fraction
try:
daily_df.loc[date, 'Consumption'] = (daily_snapshot.apply(lambda row : daily_consumption(row['CONSUMPTION'],
row['EFFECTIVE_DATE'], row['END_DATE']), axis=1)).sum()
except Exception as e:
print(date, e)
daily_df.loc[date, 'Consumption'] = 0.0
# TODO delete once we have no missing data
for missing_range_endpts in cfg['DATA']['MISSING_RANGES']:
missing_range = pd.date_range(pd.to_datetime(missing_range_endpts[0]), pd.to_datetime(missing_range_endpts[1]))
daily_df = daily_df[~daily_df.index.isin(missing_range)] # Remove noise from missing date ranges
return daily_df
def preprocess_ts(cfg=None, save_raw_df=True, save_prepr_df=True, rate_class='all', out_path=None):
'''
Transform raw water demand data into a time series dataset ready to be fed into a model.
:param cfg: project config
:param save_raw_df: Flag indicating whether to save intermediate raw data
:param save_prepr_df: Flag indicating whether to save the preprocessed data
:param rate_class: Rate class to filter by
:param out_path: Path to save updated preprocessed data
'''
run_start = datetime.today()
tqdm.pandas()
if cfg is None:
cfg = yaml.full_load(open("./config.yml", 'r')) # Load project config data
raw_df = load_raw_data(cfg, rate_class=rate_class, save_raw_df=save_raw_df)
preprocessed_df = calculate_ts_data(cfg, raw_df)
preprocessed_df = preprocessed_df[cfg['DATA']['START_TRIM']:-cfg['DATA']['END_TRIM']]
if save_prepr_df:
out_path = cfg['PATHS']['PREPROCESSED_DATA'] if out_path is None else out_path
preprocessed_df.to_csv(out_path, sep=',', header=True)
print("Done. Runtime = ", ((datetime.today() - run_start).seconds / 60), " min")
return preprocessed_df
def preprocess_new_data(cfg, save_raw_df=True, save_prepr_df=True, rate_class='all', out_path=None):
'''
Preprocess a new raw data file and merge it with preexisting preprocessed data.
:param cfg: Project config
:param save_df: Flag indicating whether to save the combined preprocessed dataset
:param rate_class: Rate class to filter raw data by
:param out_path: Path to save updated preprocessed data
'''
# Load new raw data and remove any rows that appear in old raw data
old_raw_df = pd.read_csv(cfg['PATHS']['RAW_DATASET'], low_memory=False)
old_raw_df['EFFECTIVE_DATE'] = pd.to_datetime(old_raw_df['EFFECTIVE_DATE'], errors='coerce')
min_preprocess_date = old_raw_df['EFFECTIVE_DATE'].max() - timedelta(days=183) # Latest date in old raw dataset minus 1/2 year, to be safe
new_raw_df = load_raw_data(cfg, rate_class=rate_class, save_raw_df=save_raw_df)
if new_raw_df.shape[1] > old_raw_df.shape[1]:
new_raw_df = new_raw_df[old_raw_df.columns] # If additional features added, remove them
# Preprocess new raw data
new_preprocessed_df = calculate_ts_data(cfg, new_raw_df, start_date=min_preprocess_date)
# Load old preprocessed data
old_preprocessed_df =
|
pd.read_csv(cfg['PATHS']['PREPROCESSED_DATA'])
|
pandas.read_csv
|
# coding:utf-8
import os
from pathlib import Path
import sys
import argparse
import pdb
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from tqdm import tqdm
import pickle
import time
from datetime import datetime, timedelta
from sklearn.metrics import confusion_matrix
from functools import partial
import scipy as sp
import matplotlib.pyplot as plt
#from matplotlib_venn import venn2
import lightgbm as lgb
from sklearn import preprocessing
import seaborn as sns
import gc
import psutil
import os
from IPython.display import FileLink
import statistics
import json
import ast
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_validate
import collections
import random
import functools
from sklearn.metrics import roc_curve,auc,accuracy_score,confusion_matrix,f1_score,classification_report
from sklearn.metrics import mean_squared_error
# The metric in question
from sklearn.metrics import cohen_kappa_score
import copy
from sklearn.model_selection import StratifiedKFold, KFold, train_test_split
import itertools
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from distutils.util import strtobool
import math
from scipy.sparse import csr_matrix, save_npz, load_npz
from typing import Union
from sklearn.decomposition import PCA
#import dask.dataframe as dd
import re
from sklearn.cluster import KMeans
from contextlib import contextmanager
from collections import deque
#import eli5
#from eli5.sklearn import PermutationImportance
import shutil
import array
#import sqlite3
#from tsfresh.utilities.dataframe_functions import roll_time_series
#from tsfresh import extract_features
SEED_NUMBER=2020
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
set_seed(SEED_NUMBER)
pd.set_option('display.max_columns', 5000)
pd.set_option('display.max_rows', 1000)
EMPTY_NUM=-999
# https://github.com/lopuhin/kaggle-imet-2019/blob/master/imet/utils.py#L17
ON_KAGGLE = False#'KAGGLE_URL_BASE'in os.environ
#print(" os.environ :", os.environ)
print("ON_KAGGLE:", ON_KAGGLE)
if not ON_KAGGLE:
#import slackweb
try:
import wandb
from wandb.lightgbm import wandb_callback
except:
print(f"error : cannot import wandb")
else:
import warnings
warnings.simplefilter('ignore')
PROJECT_NAME = "probspace_kiva"
INPUT_DIR = Path("../data/raw")
PROC_DIR = Path("../data/proc")
LOG_DIR = Path("../data/log")
OUTPUT_DIR = Path("../data/submission")
PATH_TO_GRAPH_DIR=Path("../data/graph")
PATH_TO_MODEL_DIR=Path("../data/model")
PATH_TO_UPLOAD_MODEL_PARENT_DIR=Path("../data/model")
PATH_TO_FEATURES_DIR=Path("../data/features")
class Colors:
"""Defining Color Codes to color the text displayed on terminal.
"""
blue = "\033[94m"
green = "\033[92m"
yellow = "\033[93m"
red = "\033[91m"
end = "\033[0m"
def color(string: str, color: Colors = Colors.yellow) -> str:
return f"{color}{string}{Colors.end}"
@contextmanager
def timer2(label: str) -> None:
"""compute the time the code block takes to run.
"""
p = psutil.Process(os.getpid())
start = time.time() # Setup - __enter__
m0 = p.memory_info()[0] / 2. ** 30
print(color(f"{label}: Start at {start}; RAM USAGE AT START {m0}"))
try:
yield # yield to body of `with` statement
finally: # Teardown - __exit__
m1 = p.memory_info()[0] / 2. ** 30
delta = m1 - m0
sign = '+' if delta >= 0 else '-'
delta = math.fabs(delta)
end = time.time()
print(color(f"{label}: End at {end} ({end - start}[s] elapsed); RAM USAGE AT END {m1:.2f}GB ({sign}{delta:.2f}GB)", color=Colors.red))
@contextmanager
def trace(title):
t0 = time.time()
p = psutil.Process(os.getpid())
m0 = p.memory_info()[0] / 2. ** 30
yield
m1 = p.memory_info()[0] / 2. ** 30
delta = m1 - m0
sign = '+' if delta >= 0 else '-'
delta = math.fabs(delta)
print(f"[{m1:.1f}GB({sign}{delta:.1f}GB):{time.time() - t0:.1f}sec] {title} ", file=sys.stderr)
def cpu_dict(my_dictionary, text=None):
size = sys.getsizeof(json.dumps(my_dictionary))
#size += sum(map(sys.getsizeof, my_dictionary.values())) + sum(map(sys.getsizeof, my_dictionary.keys()))
print(f"{text} size : {size}")
def cpu_stats(text=None):
#if not ON_KAGGLE:
pid = os.getpid()
py = psutil.Process(pid)
memory_use = py.memory_info()[0] / 2. ** 30
print('{} memory GB:'.format(text) + str(memory_use))#str(np.round(memory_use, 2)))
def reduce_mem_Series(se, verbose=True, categories=False):
numeric2reduce = ["int16", "int32", "int64", "float64"]
col_type = se.dtype
best_type = None
if (categories==True) & (col_type == "object"):
se = se.astype("category")
best_type = "category"
elif col_type in numeric2reduce:
downcast = "integer" if "int" in str(col_type) else "float"
se = pd.to_numeric(se, downcast=downcast)
best_type = se.dtype.name
if verbose and best_type is not None and best_type != str(col_type):
print(f"Series '{se.index}' converted from {col_type} to {best_type}")
return se
def reduce_mem_usage(df, verbose=True, categories=False):
# All types that we want to change for "lighter" ones.
# int8 and float16 are not include because we cannot reduce
# those data types.
# float32 is not include because float16 has too low precision.
numeric2reduce = ["int16", "int32", "int64", "float64"]
start_mem = 0
if verbose:
start_mem = df.memory_usage().sum() / 1024**2
#start_mem = memory_usage_mb(df, deep=deep)
for col, col_type in df.dtypes.iteritems():
best_type = None
if (categories==True) & (col_type == "object"):
df[col] = df[col].astype("category")
best_type = "category"
elif col_type in numeric2reduce:
downcast = "integer" if "int" in str(col_type) else "float"
df[col] = pd.to_numeric(df[col], downcast=downcast)
best_type = df[col].dtype.name
# Log the conversion performed.
if verbose and best_type is not None and best_type != str(col_type):
print(f"Column '{col}' converted from {col_type} to {best_type}")
if verbose:
#end_mem = memory_usage_mb(df, deep=deep)
end_mem = df.memory_usage().sum() / 1024**2
diff_mem = start_mem - end_mem
percent_mem = 100 * diff_mem / start_mem
print(f"Memory usage decreased from"
f" {start_mem:.2f}MB to {end_mem:.2f}MB"
f" ({diff_mem:.2f}MB, {percent_mem:.2f}% reduction)")
return df
@contextmanager
def timer(name):
t0 = time.time()
yield
print(f'[{name}] done in {time.time() - t0:.6f} s')
def normal_sampling(mean, label_k, std=2, under_limit=1e-15):
val = math.exp(-(label_k-mean)**2/(2*std**2))/(math.sqrt(2*math.pi)*std)
if val < under_limit:
val = under_limit
return val
def compHist(np_oof, np_y_pred, np_y_true, title_str):
np_list = [np_oof, np_y_true, np_y_pred]
label_list = ["oof", "true", "pred"]
color_list = ['red', 'blue', 'green']
for np_data, label, color in zip(np_list, label_list, color_list):
sns.distplot(
np_data,
#bins=sturges(len(data)),
color=color,
kde=True,
label=label
)
plt.savefig(str(PATH_TO_GRAPH_DIR / f"{title_str}_compHist.png"))
plt.close()
def compPredTarget(y_pred, y_true, index_list, title_str, lm_flag=False):
df_total = pd.DataFrame({"Prediction" : y_pred.flatten(),
"Target" : y_true.flatten(),
"Difference" : y_true.flatten() -y_pred.flatten()
#"type" : np.full(len(y_pred), "oof")
}, index=index_list)
print(df_total)
print("Difference > 0.1 : ", df_total[np.abs(df_total["Difference"]) > 0.1].Difference.count())
#print(df_total[df_total["type"]=="valid_train"].Difference)
fig = plt.figure()
sns.displot(df_total.Difference,bins=10)
plt.savefig(str(PATH_TO_GRAPH_DIR / f"{title_str}_oof_diff_distplot.png"))
plt.close()
#pdb.set_trace()
if lm_flag:
plt.figure()
fig2 = sns.lmplot(x="Target", y="Prediction", data=df_total, palette="Set1")
#fig.set_axis_labels('target', 'pred')
plt.title(title_str)
plt.tight_layout()
plt.savefig(str(PATH_TO_GRAPH_DIR / f"{title_str}_oof_true_lm.png"))
plt.close()
def dimensionReductionPCA(df, _n_components, prefix="PCA_"):
pca = PCA(n_components=_n_components)
pca.fit(df)
reduced_feature = pca.transform(df)
df_reduced = pd.DataFrame(reduced_feature, columns=[f"{prefix}{x + 1}" for x in range(_n_components)], index=df.index)
print(f"df_reduced:{df_reduced}")
df_tmp = pd.DataFrame(pca.explained_variance_ratio_, index=[f"{prefix}{x + 1}" for x in range(_n_components)])
print(df_tmp)
import matplotlib.ticker as ticker
plt.gca().get_xaxis().set_major_locator(ticker.MaxNLocator(integer=True))
plt.plot([0] + list( np.cumsum(pca.explained_variance_ratio_)), "-o")
plt.xlabel("Number of principal components")
plt.ylabel("Cumulative contribution rate")
plt.grid()
path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_PCA.png")
#print("save: ", path_to_save)
plt.savefig(path_to_save)
plt.show(block=False)
plt.close()
# df_comp = pd.DataFrame(pca.components_, columns=df.columns, index=[f"{prefix}{x + 1}" for x in range(_n_components)])
# print(df_comp)
# plt.figure(figsize=(6, 6))
# for x, y, name in zip(pca.components_[0], pca.components_[1], df.columns):
# plt.text(x, y, name)
# plt.scatter(pca.components_[0], pca.components_[1], alpha=0.8)
# plt.grid()
# plt.xlabel("PC1")
# plt.ylabel("PC2")
# path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_PCA_scatter.png")
# #print("save: ", path_to_save)
# plt.savefig(path_to_save)
# plt.show(block=False)
# plt.close()
return df_reduced
def addNanPos(df, cols_list:list, suffix="nan_pos"):
for col in cols_list:
if df[col].isnull().any():
df["{}_{}".format(col, suffix)] = df[col].map(lambda x: 1 if pd.isna(x) else 0)
return df
def get_feature_importances(X, y, shuffle=False):
# 必要ならば目的変数をシャッフル
if shuffle:
y = np.random.permutation(y)
# モデルの学習
clf = RandomForestClassifier(random_state=42)
clf.fit(X, y)
# 特徴量の重要度を含むデータフレームを作成
imp_df = pd.DataFrame()
imp_df["feature"] = X.columns
imp_df["importance"] = clf.feature_importances_
return imp_df.sort_values("importance", ascending=False)
def nullImporcance(df_train_X, df_train_y, th=80, n_runs=100):
# 実際の目的変数でモデルを学習し、特徴量の重要度を含むデータフレームを作成
actual_imp_df = get_feature_importances(df_train_X, df_train_y, shuffle=False)
# 目的変数をシャッフルした状態でモデルを学習し、特徴量の重要度を含むデータフレームを作成
N_RUNS = n_runs
null_imp_df = pd.DataFrame()
for i in range(N_RUNS):
print("run : {}".format(i))
imp_df = get_feature_importances(df_train_X, df_train_y, shuffle=True)
imp_df["run"] = i + 1
null_imp_df = pd.concat([null_imp_df, imp_df])
def display_distributions(actual_imp_df, null_imp_df, feature, path_to_save_dir):
# ある特徴量に対する重要度を取得
actual_imp = actual_imp_df.query("feature == '{}'".format(feature))["importance"].mean()
null_imp = null_imp_df.query("feature == '{}'".format(feature))["importance"]
# 可視化
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
a = ax.hist(null_imp, label="Null importances")
ax.vlines(x=actual_imp, ymin=0, ymax=np.max(a[0]), color='r', linewidth=10, label='Real Target')
ax.legend(loc="upper right")
ax.set_title("Importance of {}".format(feature), fontweight='bold')
plt.xlabel("Null Importance Distribution for {}".format(feature))
plt.ylabel("Importance")
plt.show()
path_to_save = os.path.join(str(path_to_save_dir), "null_imp_{}".format(feature))
plt.savefig(path_to_save)
# 実データにおいて特徴量の重要度が高かった上位5位を表示
for feature in actual_imp_df["feature"]:
display_distributions(actual_imp_df, null_imp_df, feature, PATH_TO_GRAPH_DIR)
# 閾値を設定
THRESHOLD = th
# 閾値を超える特徴量を取得
null_features = []
for feature in actual_imp_df["feature"]:
print("Null :: {}".format(feature))
actual_value = actual_imp_df.query("feature=='{}'".format(feature))["importance"].values
null_value = null_imp_df.query("feature=='{}'".format(feature))["importance"].values
percentage = (null_value < actual_value).sum() / null_value.size * 100
print("actual_value: {}, null_value : {}, percentage : {}".format(actual_value, null_value, percentage))
if percentage < THRESHOLD and (100-THRESHOLD) < percentage:
null_features.append(feature)
return null_features
def makeFourArithmeticOperations(df, col1, col2):
new_col = "auto__{}_add_{}".format(col1, col2)
df[new_col] = df[col1] + df[col2]
new_col = "auto__{}_diff_{}".format(col1, col2)
df[new_col] = df[col1] - df[col2]
new_col = "auto__{}_multiply_{}".format(col1, col2)
df[new_col] = df[col1] * df[col2]
new_col = "auto__{}_devide_{}".format(col1, col2)
df[new_col] = df[col1] / df[col2]
return df
def procAgg(df:pd.DataFrame, base_group_col:str, agg_col:str, agg_list:list):
for agg_func in agg_list:
new_col = "auto__{}_{}_agg_by_{}".format(agg_col, agg_func, base_group_col)
map_dict = df.groupby(base_group_col)[agg_col].agg(agg_func)
print(new_col)
print(map_dict)
df[new_col] = df[base_group_col].map(map_dict)
df[new_col] = reduce_mem_Series(df[new_col])
#df = makeFourArithmeticOperations(df, new_col, agg_col)
return df
def aggregationFE(df:pd.DataFrame, base_group_cols:list, agg_cols:list, agg_func_list:list=['count', 'max', 'min', 'sum', 'mean', "nunique", "std", "median", "skew"]):
for b in base_group_cols:
for a in agg_cols:
df = procAgg(df, b, a, agg_func_list)
return df
def makeInteractionColumn(df:pd.DataFrame, inter_cols:list):
print(inter_cols)
for c in inter_cols:
col_name = "inter_" + "_".join(c)
print(col_name)
#df[col_name] = "_"
for i, col in enumerate(c):
print(col)
if i == 0:
df[col_name] = df[col]
else:
#
#print(df[col])
df[col_name] = df[col_name].map(lambda x : str(x)) + "_" + df[col].map(lambda x : str(x))
#print(df[col_name].unique())
print("****")
return df
def interactionFE(df:pd.DataFrame, cols:list=[], inter_nums:list=[]):
for inter_num in inter_nums:
inter_cols = itertools.combinations(cols, inter_num)
df = makeInteractionColumn(df, inter_cols)
# for c in itertools.combinations(cols, inter_num):
#
# col_name = "inter_" + "_".join(c)
# print(col_name)
# #df[col_name] = "_"
#
# for i, col in enumerate(c):
# print(col)
# if i == 0:
# df[col_name] = df[col]
# else:
# #
# #print(df[col])
# df[col_name] = df[col_name].map(lambda x : str(x)) + "_" + df[col].map(lambda x : str(x))
#
# print(df[col_name].unique())
return df
def interactionFEbyOne(df:pd.DataFrame, inter_col:str, target_cols:list, inter_nums:list=[1]):
for inter_num in inter_nums:
comb = itertools.combinations(target_cols, inter_num)
for c in comb:
if not inter_col in c:
inter_cols = (inter_col,) + c
print(inter_cols)
df = makeInteractionColumn(df, [inter_cols])
return df
def calcSmoothingParam(num_of_data, k=100, f=100):
param = 1 / (1 + np.exp(-(num_of_data - k)/f))
return param
def calcSmoothingTargetMean(df:pd.DataFrame, col_name, target_name):
#print(df[target_name])
all_mean = df[target_name].mean()
#print(all_mean)
#sys.exit()
df_vc = df[col_name].value_counts()
gp_mean_dict = df.groupby(col_name)[target_name].mean()
smooth_target_mean = df_vc.copy()
for key, val in gp_mean_dict.items():
n=df_vc[key]
param = calcSmoothingParam(num_of_data=n)
smooth = param * val + (1-param)*all_mean
smooth_target_mean[key] = smooth
print("label : {}, n = {}, val={}, all = {}, param = {}, final={}".format(key, n, val, all_mean, param, smooth))
del smooth_target_mean, df_vc
gc.collect()
return smooth_target_mean
def targetEncoding(df_train_X, df_train_y, encoding_cols:list, _n_splits=4, smooth_flag=0):
dict_all_train_target_mean = {}
for c in encoding_cols:
# print("Target Encoding : {}".format(c))
# print(f"df_train_X[c] : {df_train_X[c].shape}")
# print(f"df_train_y : {df_train_y.shape}")
#df_data_tmp = pd.DataFrame({c: df_train_X[c], "target":df_train_y})
df_data_tmp =
|
pd.DataFrame(df_train_X[c])
|
pandas.DataFrame
|
from __future__ import annotations
import random
import logging
import warnings
from typing import List, Dict, Tuple, Optional, Iterable, Any
import numpy as np
import pandas as pd
import scipy.sparse
import shapely.geometry
import mobilib.markov
import mobilib.vector
logger = logging.getLogger(__name__)
def components_by_targets(targets: np.ndarray,
strong: bool = True,
return_count: bool = False,
) -> Optional[np.ndarray]:
n = len(targets)
target_neigh = scipy.sparse.csr_matrix(
(
np.ones(n, dtype=bool),
(np.arange(n), targets),
),
shape=(n, n)
)
n_strong, labels = scipy.sparse.csgraph.connected_components(
target_neigh,
directed=True,
connection=('strong' if strong else 'weak'),
return_labels=True
)
out_labels = None if n_strong == n else labels
if return_count:
return n_strong, out_labels
else:
return out_labels
def random_weighted(cum_weights: np.ndarray) -> int:
# argmax gets first occurrence of True (the maximum)
return (cum_weights >= random.random()).argmax()
class Relations:
def __init__(self, matrix: np.ndarray, weights: Optional[np.ndarray] = None):
self.matrix = self._correct_matrix(matrix)
assert len(set(self.matrix.shape)) == 1 # matrix must be square
self.n = self.matrix.shape[0]
self.outsums = self.matrix.sum(axis=1)
self.insums = self.matrix.sum(axis=0)
self.weights = weights if weights is not None else self.outsums
self.totweight = self.weights.sum()
self.unit_weights = self.weights / self.totweight
self.cum_weights = self.unit_weights.cumsum()
self.selfrels = np.diag(self.matrix)
self.outsums_noself = self.outsums - self.selfrels
self.transition_probs = self.matrix / self.outsums[:, np.newaxis]
self.selfprobs = np.diag(self.transition_probs)
rels_w = (self.matrix - np.diag(self.selfrels))
rels_w_sums = rels_w.sum(axis=1)
self.weighting = rels_w * (
self.outsums / np.where(rels_w_sums, rels_w_sums, 1)
)[:, np.newaxis] / self.matrix.sum()
def _correct_matrix(self, matrix: np.ndarray) -> np.ndarray:
# add ones on the diagonal for absorbing states
no_outflows = (matrix.sum(axis=1) == 0)
return matrix + np.diag(no_outflows)
def main_component(self) -> np.ndarray:
n_comps, comp_labels = scipy.sparse.csgraph.connected_components(
self.matrix,
directed=True,
connection='weak',
return_labels=True
)
if n_comps == 1:
return np.ones(len(comp_labels), dtype=bool)
else:
maxcomp = np.bincount(comp_labels).argmax()
return comp_labels == maxcomp
def weighted_sum(self, items: np.ndarray) -> float:
return (items * self.unit_weights).sum()
@classmethod
def from_dataframe(cls,
df: pd.DataFrame,
from_id_col: Optional[str] = None,
to_id_col: Optional[str] = None,
strength_col: Optional[str] = None,
) -> Tuple[Relations, np.ndarray]:
if from_id_col is None:
from_id_col = df.columns[0]
if to_id_col is None:
to_id_col = df.columns[1]
if strength_col is None:
strength_col = df.columns[-1]
all_ids = np.array(list(sorted(set(
list(df[from_id_col].unique())
+ list(df[to_id_col].unique())
))))
n = len(all_ids)
matrix = np.zeros((n, n), dtype=df[strength_col].dtype)
from_ids = np.searchsorted(all_ids, df[from_id_col])
to_ids = np.searchsorted(all_ids, df[to_id_col])
matrix[from_ids, to_ids] = df[strength_col].values
return cls(matrix), all_ids
class Model:
PARENT_COL = 'parent'
STAGE_COL = 'stage'
INDEX_COL = 'id'
ORG_COL = 'organic'
AUTONOMOUS_COL = 'autonomous'
HIER_COL = 'hier'
BASE_COLS = [PARENT_COL, STAGE_COL]
STAGE_AUTONOM = 'A'
STAGE_NODAL = 'N'
STAGE_ORG = 'O'
STAGES = [STAGE_AUTONOM, STAGE_NODAL, STAGE_ORG]
def __init__(self, df: pd.DataFrame, more_columns: bool = 'error'):
if more_columns == 'ignore':
df = df[self.BASE_COLS].copy()
if set(df.columns) != set(self.BASE_COLS):
raise ValueError(f'superfluous columns in HSSM input: {df.columns}')
self.df = df
def reset_cache(self):
self.df.drop(
[col for col in self.df.columns if col not in self.BASE_COLS],
axis=1, inplace=True
)
@property
def n(self):
return len(self.df)
@property
def index(self):
return self.df.index
@property
def parents(self):
return self.df[self.PARENT_COL]
def parent_is(self, selector: Optional[pd.Series] = None) -> np.ndarray:
parents = self.df[self.PARENT_COL]
if selector is not None:
parents = parents[selector]
return self.df.index.get_indexer(parents)
def hier_is(self):
return np.flatnonzero(self.hier)
def index_of(self, id) -> int:
return self.df.index.get_loc(id)
def indices_of(self, ids: np.ndarray) -> np.ndarray:
return self.df.index.get_indexer(ids)
@property
def organics(self):
if self.ORG_COL not in self.df.columns:
self.df[self.ORG_COL] = (self.df[self.STAGE_COL] == self.STAGE_ORG)
return self.df[self.ORG_COL]
@property
def autonomous(self):
if self.AUTONOMOUS_COL not in self.df.columns:
self.df[self.AUTONOMOUS_COL] = (self.df[self.STAGE_COL] == self.STAGE_AUTONOM)
return self.df[self.AUTONOMOUS_COL]
@property
def hier(self):
if self.HIER_COL not in self.df.columns:
self.df[self.HIER_COL] = ~self.autonomous
return self.df[self.HIER_COL]
def org_nodes(self):
org_subs = self.parents[self.organics]
org_parents = org_subs.unique()
return pd.concat((org_subs, pd.Series(org_parents, index=org_parents)))
@property
def tree(self):
return self.df.loc[self.hier,:]
def root_id(self):
return self.df[(self.parents == self.index) & self.hier].index[0]
def root_i(self):
return np.flatnonzero((self.parents == self.index) & self.hier)[0]
def root_ids(self):
root_id = self.root_id()
root_org_subs = (self.parents == root_id) & self.organics
return np.hstack(([root_id], root_org_subs[root_org_subs].index))
def root_is(self):
return self.indices_of(self.root_ids())
def is_root(self, key: Any) -> bool:
row = self.df.loc[key, :]
return row[self.PARENT_COL] == key and row[self.STAGE_COL] == self.STAGE_NODAL
def is_valid(self) -> bool:
try:
self.check()
return True
except AssertionError:
return False
def check(self) -> None:
# conditions:
# all parent ids must be in the index
assert set(self.df[self.PARENT_COL]).issubset(set(self.index))
# only valid stages
assert set(self.df[self.STAGE_COL].unique()).issubset(set(self.STAGES))
# no cycles in the hierarchy tree
assert components_by_targets(self.parent_is()) is None, 'cyclic parental relationship'
# targets of bindings must be nodal
assert (self.df[self.STAGE_COL][self.tree[self.PARENT_COL]] == self.STAGE_NODAL).all(), \
'dependence on autonomous unit or non-head unit of organic subsystem'
# the tree must be a single component
n_tree_comps, comp_labels = components_by_targets(
self.parent_is(self.hier), strong=False, return_count=True
)
assert n_tree_comps == 1, 'non-unified hierarchy tree (actually a forest)'
# there must be a single root
assert len(self.df[(self.parents == self.index) & self.hier]) == 1, 'multi-root hierarchy tree'
@classmethod
def from_arrays(cls,
parents: np.ndarray,
stages: np.ndarray,
index: Optional[np.ndarray] = None,
) -> StagedModel:
return cls(pd.DataFrame({
cls.PARENT_COL: parents,
cls.STAGE_COL: stages,
}, index=index))
@classmethod
def from_flag_arrays(cls,
parents: np.ndarray,
autonomous: np.ndarray,
organics: np.ndarray,
index: Optional[np.ndarray] = None,
) -> StagedModel:
stages = np.where(
autonomous, self.STAGE_AUTONOM,
np.where(
organics, self.STAGE_ORG,
self.STAGE_NODAL
)
)
return cls.from_arrays(parents, stages, index=index)
def connection_df(self) -> pd.DataFrame:
self.hier, self.organics # to have secondary columns ready
conn_df = (
self.df
.rename_axis(index=self.INDEX_COL)
.reset_index()
.query(f'{self.INDEX_COL} != {self.PARENT_COL} and {self.HIER_COL}')
[[self.INDEX_COL, self.PARENT_COL, self.ORG_COL]]
)
if conn_df[self.ORG_COL].any():
return self._add_organic_connections(conn_df)
else:
return conn_df
def _add_organic_connections(self, conn_df: pd.DataFrame) -> pd.DataFrame:
org_conn_df = conn_df.query(self.ORG_COL)
# For units bound to organic subsystems, add connections to their non-head units.
conn_add_simples_df = self._join_on_parent(
conn_df.query(f'not {self.ORG_COL}'), org_conn_df
)
# For non-head units in organic subsystems, add connections to other non-head units.
conn_add_org_head_df = self._join_on_parent(org_conn_df, org_conn_df).query(
f'{self.INDEX_COL} > {self.PARENT_COL}'
)
# For non-head units in organic subsystems, add connections to their head's parent.
conn_add_head_tgt_df = org_conn_df.merge(
pd.concat([conn_df, conn_add_simples_df]),
left_on=self.PARENT_COL,
right_on=self.INDEX_COL,
suffixes=('_src', '')
)[[f'{self.INDEX_COL}_src', self.PARENT_COL, self.ORG_COL]].rename(
columns={f'{self.INDEX_COL}_src': self.INDEX_COL}
)
return pd.concat([
conn_df,
conn_add_simples_df.assign(**{self.ORG_COL: False}),
conn_add_org_head_df.assign(**{self.ORG_COL: True}),
conn_add_head_tgt_df.assign(**{self.ORG_COL: False}),
], ignore_index=True)
def _join_on_parent(self, df1: pd.DataFrame, df2: pd.DataFrame) -> pd.DataFrame:
indexcol_key_tgt = f'{self.INDEX_COL}_tgt'
return df1.merge(
df2, on=self.PARENT_COL, suffixes=('', '_tgt')
)[[self.INDEX_COL, indexcol_key_tgt, self.ORG_COL]].rename(
columns={indexcol_key_tgt: self.PARENT_COL}
)
def tree_keys(self, head_key: Any) -> List[Any]:
parents = self.parents.reset_index(drop=True)
prev_n_keys = 0
tree_keys = {head_key: None}
while len(tree_keys) > prev_n_keys:
prev_n_keys = len(tree_keys)
tree_keys.update({unit_key: None for unit_key in self.parents[self.parents.isin(tree_keys.keys())].index})
return list(tree_keys.keys())
def descendant_keys(self, head_key: Any) -> List[Any]:
return self.tree_keys(head_key)[1:]
def copy(self) -> Model:
return Model(self.df[self.BASE_COLS].copy())
def to_lines(self, points: 'gpd.GeoSeries') -> 'gpd.GeoSeries':
org_node_df = (
pd.DataFrame({'node': self.org_nodes()})
.join(points.rename('from_pt'))
)
org_center_pts = org_node_df.groupby('node')['from_pt'].agg(mobilib.vector.centroid)
org_pt_df = (
org_node_df
.join(org_center_pts.rename('to_pt'), on='node')
[['from_pt', 'to_pt']]
)
if len(org_pt_df) == 0:
org_lines = type(points)()
else:
org_lines = org_pt_df.apply(lambda row: mobilib.vector.straight_line(
row['from_pt'], row['to_pt']
), axis=1)
org_lines.index = pd.MultiIndex.from_arrays((
org_lines.index,
pd.Series(self.STAGE_ORG, index=org_lines.index)
))
is_nodal = self.df[self.STAGE_COL] == self.STAGE_NODAL
pts_nodal = points[is_nodal]
pts_nodal[org_center_pts.index] = org_center_pts
nodal_lines = (
self.df.loc[is_nodal,[self.PARENT_COL]]
.drop(self.root_id())
.join(pts_nodal.rename('from_pt'))
.join(pts_nodal.rename('to_pt'), on=self.PARENT_COL)
[['from_pt', 'to_pt']]
.apply(lambda row: mobilib.vector.straight_line(row['from_pt'], row['to_pt']), axis=1)
)
nodal_lines.index = pd.MultiIndex.from_arrays((
nodal_lines.index,
pd.Series(self.STAGE_NODAL, index=nodal_lines.index)
))
auto_lines = (
points[self.df[self.STAGE_COL] == self.STAGE_AUTONOM]
.apply(lambda pt: mobilib.vector.straight_line(pt, pt))
)
auto_lines.index = pd.MultiIndex.from_arrays((
auto_lines.index,
pd.Series(self.STAGE_AUTONOM, index=auto_lines.index)
))
return (
type(points)(pd.concat([nodal_lines, org_lines, auto_lines]))
.rename_axis(index=[self.INDEX_COL, self.STAGE_COL + '_line'])
)
class FitnessCriterion:
def evaluate(self, model: Model, rels: Relations) -> float:
raise NotImplementedError
def evaluate_nodes(self, model: Model, rels: Relations) -> pd.Series:
raise NotImplementedError
def fitness_criterion(name: str, **kwargs) -> FitnessCriterion:
return FITNESS_CRITERIA[name](**kwargs)
class MFPTCriterion(FitnessCriterion):
def binding_matrix(self, model: Model, rels: Relations) -> np.ndarray:
raise NotImplementedError
# binding matrix would have to re-include self-interactions
def evaluate(self, model: Model, rels: Relations) -> float:
return self.evaluate_nodes(model, rels).sum()
def evaluate_nodes(self, model: Model, rels: Relations) -> pd.Series:
bmatrix = self.binding_matrix(model, rels)
hier_is = model.hier_is()
sel_bmatrix = bmatrix[hier_is][:,hier_is].tocsr()
transition_matrix = mobilib.markov.transition_matrix(sel_bmatrix)
tree_mfpt = mobilib.markov.mfpt(transition_matrix)
return pd.Series(
(rels.weighting[hier_is][:,hier_is] / tree_mfpt).sum(axis=1),
index=hier_is
)
class DijkstraCriterion(FitnessCriterion):
def evaluate(self, model: Model, rels: Relations) -> float:
return self.evaluate_nodes(model, rels).sum()
def evaluate_nodes(self, model: Model, rels: Relations) -> pd.Series:
bmatrix = self.binding_matrix(model, rels)
hier_is = model.hier_is()
sel_bmatrix = bmatrix[hier_is][:,hier_is].tocsr()
path_costs = (
scipy.sparse.csgraph.dijkstra(edge_costs)
+ np.diag(np.ones(edge_costs.shape[0]))
)
return pd.Series(
(rels.weighting[hier_is][:,hier_is] / path_costs).sum(axis=1),
index=hier_is
)
def binding_matrix(self, model: Model, rels: Relations) -> np.ndarray:
diag = np.diag(rels.matrix)
conn_df = model.connection_df()
from_is = model.indices_of(conn_df[model.INDEX_COL])
to_is = model.indices_of(conn_df[model.PARENT_COL])
conn_values_along = rels.matrix[from_is, to_is]
conn_values_cnter = rels.matrix[to_is, from_is]
conn_values = np.where(
conn_df[model.ORG_COL],
(
np.minimum(diag[from_is], diag[to_is])
/ np.minimum(conn_values_along, conn_values_cnter)
),
diag[from_is] / conn_values_along
)
out_shape = (len(diag), len(diag))
return (
scipy.sparse.coo_matrix((conn_values, (from_is, to_is)), shape=out_shape)
+ scipy.sparse.coo_matrix((conn_values, (to_is, from_is)), shape=out_shape)
)
class StageStateCriterion:
def evaluate(self, model: Model, rels: Relations) -> float:
return rels.weighted_sum(self.evaluate_nodes(model, rels)[model.index])
def evaluate_nodes(self, model: Model, rels: Relations) -> pd.Series:
maxflow_k = self.maxflow_k(rels)
conn_df = model.connection_df()
edges = self.edge_matrix(model, conn_df)
return (
pd.Series(self.edge_fits(edges, rels), index=model.index).where(model.hier, 1)
* self.stage_coefs(model, rels, maxflow_k, conn_df.query('organic'))
)
def maxflow_k(self, rels: Relations) -> float:
return self._lstsq1(rels.outsums, (rels.matrix - np.diag(rels.selfrels)).max(axis=1))
def tree_fits(self, model: Model, rels: Relations) -> pd.Series:
edges = self.edge_matrix(model)
return pd.Series(self.edge_fits(edges, rels), index=model.index).where(model.hier, 1)
def edge_fits(self, edges: scipy.sparse.coo_matrix, rels: Relations) -> pd.Series:
# add directed=False if it helps with performance here
path_lengths = scipy.sparse.csgraph.dijkstra(edges, unweighted=True)
all_ids = np.arange(rels.n)
# set diagonal to one to prevent nans
path_lengths[all_ids,all_ids] += 1
tot_outprobs = (1 - rels.selfprobs)[:,np.newaxis]
relative_rels = rels.transition_probs / np.where(tot_outprobs == 0, 1, tot_outprobs)
relative_rels[all_ids,all_ids] = 0
return (relative_rels / path_lengths).sum(axis=1)
def path_lengths(self, model: Model) -> np.ndarray:
return scipy.sparse.csgraph.dijkstra(self.edge_matrix(model), unweighted=True)
def edge_matrix(self, model: Model, conn_df: Optional[pd.DataFrame] = None) -> scipy.sparse.coo_matrix:
if conn_df is None:
conn_df = model.connection_df()
from_is = model.indices_of(conn_df[model.INDEX_COL])
to_is = model.indices_of(conn_df[model.PARENT_COL])
out_shape = (model.n, ) * 2
ones = np.ones(len(from_is), dtype=bool)
return (
scipy.sparse.coo_matrix((ones, (from_is, to_is)), shape=out_shape)
+ scipy.sparse.coo_matrix((ones, (to_is, from_is)), shape=out_shape)
)
def stage_coefs(self,
model: Model,
rels: Relations,
maxflow_k: Optional[float] = None,
org_conn_df: Optional[pd.DataFrame] = None,
) -> pd.Series:
if maxflow_k is None: maxflow_k = self.maxflow_k(rels)
if org_conn_df is None: org_conn_df = model.connection_df().query('organic')
nodality = np.minimum(self.nodality(model, rels, maxflow_k), 1)
organicity = self.organicity(model, rels, maxflow_k, org_conn_df)
return (
nodality
.where(model.hier, 1 - nodality)
.multiply(organicity, fill_value=1)
)
def nodality(self,
model: Model,
rels: Relations,
maxflow_k: Optional[float] = None,
) -> pd.Series:
if maxflow_k is None: maxflow_k = self.maxflow_k(rels)
org_df = model.df[model.organics]
from_org_ids = model.indices_of(org_df.index)
to_org_ids = model.indices_of(org_df[model.PARENT_COL])
# from_org_ids, to_org_ids
nodality_matrix = rels.matrix - np.diag(rels.selfrels)
nodality_matrix[:,to_org_ids] += nodality_matrix[:,from_org_ids]
maxflows = np.maximum(nodality_matrix.max(axis=1), nodality_matrix.max(axis=0))
return pd.Series(maxflows / (maxflow_k * rels.outsums), index=model.index)
def organicity(self,
model: Model,
rels: Relations,
maxflow_k: Optional[float] = None,
org_conn_df: Optional[pd.DataFrame] = None,
) -> pd.Series:
return (
np.minimum(self.symcoh(model, rels, maxflow_k, org_conn_df), 1)
# * self.outsim(model, rels)
)
def symcoh(self,
model: Model,
rels: Relations,
maxflow_k: Optional[float] = None,
org_conn_df: Optional[pd.DataFrame] = None,
) -> pd.Series:
if maxflow_k is None: maxflow_k = self.maxflow_k(rels)
if org_conn_df is None: org_conn_df = model.connection_df().query('organic')
org_link_df = pd.concat([
org_conn_df,
org_conn_df.rename(columns={
model.INDEX_COL: model.PARENT_COL,
model.PARENT_COL: model.INDEX_COL
})
], ignore_index=True)
from_is = model.indices_of(org_link_df[model.INDEX_COL])
to_is = model.indices_of(org_link_df[model.PARENT_COL])
strengths = np.minimum(rels.matrix[from_is, to_is], rels.matrix[to_is, from_is])
# weights = np.sqrt(rels.outsums[from_is] ** 2 + rels.outsums[to_is] ** 2)
weights = np.maximum(rels.outsums[from_is], rels.outsums[to_is])
org_link_df['organicity'] = strengths / (maxflow_k * weights)
return org_link_df.groupby(model.INDEX_COL)['organicity'].min()
def outsim(self,
model: Model,
rels: Relations,
) -> pd.Series:
wt_ser = pd.Series(rels.weights, index=model.df.index)
org_nodes = model.org_nodes()
org_weights = wt_ser[org_nodes.index]
org_node_df = (
pd.DataFrame({'node': org_nodes, 'weight': org_weights})
.join(org_weights.groupby(org_nodes).sum().rename('node_weight'), on='node')
.assign(node_frac=lambda df: df.eval('weight / node_weight'))
.drop(['weight', 'node_weight'], axis=1)
)
outsims =
|
pd.Series(1., index=org_node_df.index)
|
pandas.Series
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 11 22:14:51 2021
@author: Allectus
"""
import os
import re
import copy
import pandas as pd
import tkinter as tk
import plotly.io as pio
import plotly.express as px
from tkinter import filedialog
from lxml import etree
#==============================================================================
def parse_asset_file(xmlfile, taglist, convert=True, collapse_diffs=True):
#Parses X4:Foundations asset xml files
#
#xmlfile: file path to desired input asset file
#taglist: XML asset property tag to collect attributes for
#convert: If True attributes will be converted to floats
xtree = etree.parse(xmlfile)
result = {}
for attr in taglist:
attr_element = xtree.find('//' + str(attr))
if attr_element is not None:
attr_path = xtree.getpath(attr_element)
if collapse_diffs:
attr_path = re.sub(r'/diff/(replace|add)', '', attr_path)
if attr_element is None:
attr_dict = {}
else:
attr_dict = {str(attr_path) + '/' + str(k):v for k,v in attr_element.attrib.items()}
if convert:
attr_dict = {k:float(v) for k,v in attr_dict.items()}
else:
attr_dict = {}
result.update(attr_dict)
return(result)
#------------------------------------------------------------------------------
def export_asset_xml_diff(outfilepath, attributes):
#Exports X4:Foundations asset diff xml files
#
#outfilepath: file path to desired output file
#attributes: dict of xpath:value to be exported in the diff file
attributes
outstr = '\n'.join(['<?xml version="1.0" encoding="utf-8"?>',
'<diff>',
' <replace sel="' +
'\n <replace sel="'.join([str(xpath)[:str(xpath).rfind('/') + 1] + '@' +
str(xpath)[str(xpath).rfind('/') + 1:] + '">' +
str(round(val,2)) + '</replace>'
for xpath,val in attributes.items()]),
'</diff>'])
os.makedirs(os.path.dirname(outfilepath), exist_ok=True)
with open(outfilepath, 'w') as outfile:
outfile.write(outstr)
return(True)
#------------------------------------------------------------------------------
def parse_resources(resources, asset_path, file_pattern, taglist):
#Collects and parses relevant X4:Foundations asset files based upon input filters
#
#resources: pd.DataFrame of available unpacked input directories, contains resources root
#asset_path: path to relevant directory for the specific asset, relative to resource root
#file_pattern: regex pattern to id files in asset path to retain
#taglist: tags to extract from the identied input files
loc_resources = copy.deepcopy(resources)
#Find game files
loc_resources['assetdir'] = loc_resources.root.apply(lambda x: os.path.join(x, asset_path))
loc_resources['filelist'] = loc_resources.assetdir.apply(os.listdir)
loc_resources = loc_resources.explode('filelist', ignore_index=True)
#Filter out unwanted files (only keep appropriate xml files)
loc_resources.rename(columns={'filelist':'basefilename'}, inplace=True)
loc_resources['keep'] = loc_resources.basefilename.apply(lambda x: os.path.splitext(x)[1] == '.xml') & loc_resources.basefilename.str.contains(file_pattern)
loc_resources = loc_resources[loc_resources.keep].reset_index(drop=True)
loc_resources = loc_resources.drop('keep', axis=1)
loc_resources['fullpath'] = loc_resources.apply(lambda x: os.path.join(x['assetdir'], x['basefilename']), axis=1)
#Parse the discovered files
loc_resources = pd.concat([loc_resources, pd.DataFrame(list(loc_resources['fullpath'].apply(
lambda x: parse_asset_file(x, taglist=taglist, convert=True, collapse_diffs=True))))], axis=1)
return(loc_resources)
#------------------------------------------------------------------------------
def update_shields(resources, asset_path = 'assets/props/SurfaceElements/macros',
file_pattern=r'^shield.*', taglist = ['recharge']):
#Identifies and modified X4: Foundations shield files
#
#resources: pd.DataFrame of available unpacked input directories, contains resources root
#asset_path: path to relevant directory for the specific asset, relative to resource root
#file_pattern: regex pattern to id files in asset path to retain
#taglist: tags to extract from the identied input files
shield_resources = parse_resources(resources=resources, asset_path=asset_path,
file_pattern=file_pattern, taglist=taglist)
#capture owner/size/type from filename
shield_metadata = shield_resources.basefilename.str.extract(r'(shield_)(.*)(_)(s|m|l|xl)(_)(.*)(_.*)(mk.)(.*)', expand=True)
shield_metadata = shield_metadata.rename(columns={1:'race', 3:'size', 5:'type', 7:'mk'})
shield_resources = pd.concat([shield_resources, shield_metadata[['race', 'size', 'type', 'mk']]], axis=1)
#colname look up table (to retain xpath in colname so we dont have to reshape to long format)
#gives 'tag_attrib': xpath
modified_cols = {}
cnm_init = {}
for tag in taglist:
colpattern = r'.*(/' + str(tag) + r'/).*'
cnm_init.update({str(tag)+'_'+str(c)[str(c).rfind('/')+1:] :c for c in shield_resources.columns if re.match(colpattern, c)})
vro_results = shield_resources[(shield_resources['source'] == 'vro')].reset_index()
base_results = shield_resources[(shield_resources['source'] == 'base')].reset_index()
modified = pd.merge(vro_results, base_results, how='left', on=['race', 'size', 'type', 'mk'], suffixes=['_vro', '_base'])
#update colname map
cnm = copy.deepcopy(cnm_init)
cnm.update({str(k)+'_base':str(v)+'_base' for k, v in cnm_init.items()})
cnm.update({str(k)+'_vro':str(v)+'_vro' for k, v in cnm_init.items()})
#modify values
max_factors = modified.groupby(['size', 'mk']).apply(lambda x: (x[cnm['recharge_max_vro']] / x[cnm['recharge_max_base']]).mean()).reset_index()
max_factors.rename(columns={0:'max_factor'}, inplace=True)
modified = modified.merge(max_factors, how='left', on=['size', 'mk'])
modified[cnm['recharge_max']] = modified[cnm['recharge_max_base']] * modified['max_factor']
modified.loc[(modified['race'].isin(['kha'])) | (modified[cnm['recharge_max']].isna()), cnm['recharge_max']] = modified[cnm['recharge_max_vro']]
modified_cols.update({'recharge_max': cnm['recharge_max']})
modified[cnm['recharge_delay']] = modified[cnm['recharge_delay_base']] * (3/2)
modified.loc[(modified['race'].isin(['kha'])) | (~modified['size'].isin(['s'])) | (modified[cnm['recharge_delay']].isna()), cnm['recharge_delay']] = modified[cnm['recharge_delay_vro']]
modified_cols.update({'recharge_delay': cnm['recharge_delay']})
recharge_factors = modified.groupby(['size', 'mk']).apply(lambda x: (x[cnm['recharge_rate_vro']] / x[cnm['recharge_rate_base']]).mean()).reset_index()
recharge_factors.rename(columns={0:'recharge_factor'}, inplace=True)
modified = modified.merge(recharge_factors, how='left', on=['size', 'mk'])
modified[cnm['recharge_rate']] = modified[cnm['recharge_rate_base']] * modified['recharge_factor']
modified.loc[modified['size'].isin(['s']), cnm['recharge_rate']] = modified[cnm['recharge_rate_base']] * 0.9
modified.loc[modified['size'].isin(['m']), cnm['recharge_rate']] = modified[cnm['recharge_rate_base']] * modified['recharge_factor'] * 1.25
modified.loc[(modified['race'].isin(['kha'])) | (modified[cnm['recharge_rate']].isna()), cnm['recharge_rate']] = modified[cnm['recharge_rate_vro']]
modified_cols.update({'recharge_rate':cnm['recharge_rate']})
return(modified, modified_cols)
#------------------------------------------------------------------------------
def update_engines(resources, asset_path = 'assets/props/Engines/macros',
file_pattern=r'^engine.*', taglist = ['thrust', 'boost', 'travel']):
#Identifies and modified X4: Foundations engine files
#
#resources: pd.DataFrame of available unpacked input directories, contains resources root
#asset_path: path to relevant directory for the specific asset, relative to resource root
#file_pattern: regex pattern to id files in asset path to retain
#taglist: tags to extract from the identied input files
engine_resources = parse_resources(resources=resources, asset_path=asset_path,
file_pattern=file_pattern, taglist=taglist)
#capture owner/size/type from filename
engine_metadata = engine_resources.basefilename.str.extract(r'(engine_)(.*)(_)(s|m|l|xl)(_)(.*)(_.*)(mk.)(.*)', expand=True)
engine_metadata = engine_metadata.rename(columns={1:'race', 3:'size', 5:'type', 7:'mk'})
engine_resources = pd.concat([engine_resources, engine_metadata[['race', 'size', 'type', 'mk']]], axis=1)
#colname look up table (to retain xpath in colname so we dont have to reshape to long format)
#gives 'tag_attrib': xpath
modified_cols = {}
cnm_init = {}
for tag in taglist:
colpattern = r'.*(/' + str(tag) + r'/).*'
cnm_init.update({str(tag)+'_'+str(c)[str(c).rfind('/')+1:] :c for c in engine_resources.columns if re.match(colpattern, c)})
#Further filter observations to only those with travel stats (eliminate thrusters etc)
engine_resources = engine_resources[~engine_resources[cnm_init['travel_thrust']].isna()].reset_index(drop=True)
engine_resources['eff_boost_thrust'] = engine_resources[cnm_init['thrust_forward']] * engine_resources[cnm_init['boost_thrust']]
engine_resources['eff_travel_thrust'] = engine_resources[cnm_init['thrust_forward']] * engine_resources[cnm_init['travel_thrust']]
vro_results = engine_resources[(engine_resources['source'] == 'vro')].reset_index()
base_results = engine_resources[(engine_resources['source'] == 'base')].reset_index()
modified =
|
pd.merge(vro_results, base_results, how='left', on=['race', 'size', 'type', 'mk'], suffixes=['_vro', '_base'])
|
pandas.merge
|
import numpy as np
import csv
from datetime import date
import random
from sklearn import linear_model
from sklearn.model_selection import train_test_split, validation_curve
from sklearn.preprocessing import StandardScaler
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import pandas as pd
data=pd.read_csv('BOS_CUN_trips1M.csv')
def add_features():
'''
This section is a list of helper functions for further optimizing code
-------------------------------------------------------------------------------------------------------------------------------------------------
'''
def create_dict(id_list):
#creates a dictionary for relevant one-hot categorical vectors
id_dict={}
for i in range(len(id_list)):
id_dict[id_list[i]]=i
return id_dict
def total_days(departure, order):
#calculates the total days between order date and departure date and changes the raw value to categorical data
total_days=departure.sub(order)
total_days.rename(columns={0:'total days'}, axis='columns')
total_days.astype('timedelta64[D]')
total_days=total_days.apply(lambda x: x.days)
total_days=pd.cut(total_days, bins=12)
return pd.get_dummies(total_days)
def one_hot(features, feature_list, prefixes):
#creates one-hot vectors for all the categorical data
for i in range(len(feature_list)):
if type(feature_list[i])==str:
feature_vector=pd.get_dummies(data[feature_list[i]], prefix=prefixes[i])
else:
feature_vector=pd.get_dummies(feature_list[i], prefix=prefixes[i])
features=pd.concat([features,feature_vector], axis=1)
return features
'''
-------------------------------------------------------------------------------------------------------------------------------------------------
This initializes many of the labels for the data frames and certain dates into date time as well as lists to help shorten and optimize code length
------------------------------------------------------------------------------------------------------------------------------------------------------
'''
monthsDepart=['Depart January', 'Depart February', 'Depart March', 'Depart April', 'Depart May', 'Depart June', 'Depart July', 'Depart August', 'Depart September', 'Depart October', 'Depart November', 'Depart December']
monthsReturn=['Return January', 'Return February', 'Return March', 'Return April', 'Return May', 'Return June', 'Return July', 'Return August', 'Return September', 'Return October', 'Return November', 'Return December']
days_of_weekD=['Depart Monday', 'Depart Tuesday', 'Depart Wednesday', 'Depart Thursday', 'Depart Friday', 'Depart Saturday','Depart Sunday']
days_of_weekR=['Return Monday', 'Return Tuesday', 'Return Wednesday', 'Return Thursday', 'Return Friday', 'Return Saturday','Return Sunday']
#creates dictionary of carrier ids
carrier_ids=create_dict(data.majorcarrierid.unique())
#creates dictionary of cabin classes
cabin_ids=create_dict(data.cabinclass.unique())
#creates dictionary of sources
source_ids=create_dict(data.source.unique())
#converting dates to date_time
order_date=
|
pd.to_datetime(data['received_odate'])
|
pandas.to_datetime
|
""" test fancy indexing & misc """
from datetime import datetime
import re
import weakref
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
)
import pandas as pd
from pandas import (
DataFrame,
Index,
NaT,
Series,
date_range,
offsets,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.api import Float64Index
from pandas.tests.indexing.common import _mklbl
from pandas.tests.indexing.test_floats import gen_obj
# ------------------------------------------------------------------------
# Indexing test cases
class TestFancy:
"""pure get/set item & fancy indexing"""
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
# invalid
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df.loc[df.index[2:5], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
# valid
df.loc[df.index[2:6], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
result = df.loc[df.index[2:6], "bar"]
expected = Series(
[2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6], name="bar"
)
tm.assert_series_equal(result, expected)
def test_setitem_ndarray_1d_2(self):
# GH5508
# dtype getting changed?
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df[2:5] = np.arange(1, 4) * 1j
def test_getitem_ndarray_3d(
self, index, frame_or_series, indexer_sli, using_array_manager
):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
msgs = []
if frame_or_series is Series and indexer_sli in [tm.setitem, tm.iloc]:
msgs.append(r"Wrong number of dimensions. values.ndim > ndim \[3 > 1\]")
if using_array_manager:
msgs.append("Passed array should be 1-dimensional")
if frame_or_series is Series or indexer_sli is tm.iloc:
msgs.append(r"Buffer has wrong number of dimensions \(expected 1, got 3\)")
if using_array_manager:
msgs.append("indexer should be 1-dimensional")
if indexer_sli is tm.loc or (
frame_or_series is Series and indexer_sli is tm.setitem
):
msgs.append("Cannot index with multidimensional key")
if frame_or_series is DataFrame and indexer_sli is tm.setitem:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, pd.IntervalIndex) and indexer_sli is tm.iloc:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, (pd.TimedeltaIndex, pd.DatetimeIndex, pd.PeriodIndex)):
msgs.append("Data must be 1-dimensional")
if len(index) == 0 or isinstance(index, pd.MultiIndex):
msgs.append("positional indexers are out-of-bounds")
msg = "|".join(msgs)
potential_errors = (IndexError, ValueError, NotImplementedError)
with pytest.raises(potential_errors, match=msg):
idxr[nd3]
def test_setitem_ndarray_3d(self, index, frame_or_series, indexer_sli):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
if indexer_sli is tm.iloc:
err = ValueError
msg = f"Cannot set values with ndim > {obj.ndim}"
else:
err = ValueError
msg = "|".join(
[
r"Buffer has wrong number of dimensions \(expected 1, got 3\)",
"Cannot set values with ndim > 1",
"Index data must be 1-dimensional",
"Data must be 1-dimensional",
"Array conditional must be same shape as self",
]
)
with pytest.raises(err, match=msg):
idxr[nd3] = 0
def test_getitem_ndarray_0d(self):
# GH#24924
key = np.array(0)
# dataframe __getitem__
df = DataFrame([[1, 2], [3, 4]])
result = df[key]
expected = Series([1, 3], name=0)
tm.assert_series_equal(result, expected)
# series __getitem__
ser = Series([1, 2])
result = ser[key]
assert result == 1
def test_inf_upcast(self):
# GH 16957
# We should be able to use np.inf as a key
# np.inf should cause an index to convert to float
# Test with np.inf in rows
df = DataFrame(columns=[0])
df.loc[1] = 1
df.loc[2] = 2
df.loc[np.inf] = 3
# make sure we can look up the value
assert df.loc[np.inf, 0] == 3
result = df.index
expected = Float64Index([1, 2, np.inf])
tm.assert_index_equal(result, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
assert df["c"].dtype == np.float64
df.loc[0, "c"] = "foo"
expected = DataFrame(
[{"a": 1, "b": np.nan, "c": "foo"}, {"a": 3, "b": 2, "c": np.nan}]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("val", [3.14, "wxyz"])
def test_setitem_dtype_upcast2(self, val):
# GH10280
df = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3),
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left = df.copy()
left.loc["a", "bar"] = val
right = DataFrame(
[[0, val, 2], [3, 4, 5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_integer_dtype(left["foo"])
assert is_integer_dtype(left["baz"])
def test_setitem_dtype_upcast3(self):
left = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3) / 10.0,
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left.loc["a", "bar"] = "wxyz"
right = DataFrame(
[[0, "wxyz", 0.2], [0.3, 0.4, 0.5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_float_dtype(left["foo"])
assert is_float_dtype(left["baz"])
def test_dups_fancy_indexing(self):
# GH 3455
df = tm.makeCustomDataframe(10, 3)
df.columns = ["a", "a", "b"]
result = df[["b", "a"]].columns
expected = Index(["b", "a", "a"])
tm.assert_index_equal(result, expected)
def test_dups_fancy_indexing_across_dtypes(self):
# across dtypes
df = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], columns=list("aaaaaaa"))
df.head()
str(df)
result = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]])
result.columns = list("aaaaaaa")
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
def test_dups_fancy_indexing_not_in_order(self):
# GH 3561, dups not in selected order
df = DataFrame(
{"test": [5, 7, 9, 11], "test1": [4.0, 5, 6, 7], "other": list("abcd")},
index=["A", "A", "B", "C"],
)
rows = ["C", "B"]
expected = DataFrame(
{"test": [11, 9], "test1": [7.0, 6], "other": ["d", "c"]}, index=rows
)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ["C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
# see GH5553, make sure we use the right indexer
rows = ["F", "G", "H", "C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
def test_dups_fancy_indexing_only_missing_label(self):
# List containing only missing label
dfnu = DataFrame(np.random.randn(5, 3), index=list("AABCD"))
with pytest.raises(
KeyError,
match=re.escape(
"\"None of [Index(['E'], dtype='object')] are in the [index]\""
),
):
dfnu.loc[["E"]]
# ToDo: check_index_type can be True after GH 11497
@pytest.mark.parametrize("vals", [[0, 1, 2], list("abc")])
def test_dups_fancy_indexing_missing_label(self, vals):
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": vals})
with pytest.raises(KeyError, match="not in index"):
df.loc[[0, 8, 0]]
def test_dups_fancy_indexing_non_unique(self):
# non unique with non unique selector
df = DataFrame({"test": [5, 7, 9, 11]}, index=["A", "A", "B", "C"])
with pytest.raises(KeyError, match="not in index"):
df.loc[["A", "A", "E"]]
def test_dups_fancy_indexing2(self):
# GH 5835
# dups on index and missing values
df = DataFrame(np.random.randn(5, 5), columns=["A", "B", "B", "B", "A"])
with pytest.raises(KeyError, match="not in index"):
df.loc[:, ["A", "B", "C"]]
def test_dups_fancy_indexing3(self):
# GH 6504, multi-axis indexing
df = DataFrame(
np.random.randn(9, 2), index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=["a", "b"]
)
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ["a", "b"]]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_duplicate_int_indexing(self, indexer_sl):
# GH 17347
ser = Series(range(3), index=[1, 1, 3])
expected = Series(range(2), index=[1, 1])
result = indexer_sl(ser)[[1]]
tm.assert_series_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame(
{"a": {1: "aaa", 2: "bbb", 3: "ccc"}, "b": {1: 111, 2: 222, 3: 333}}
)
# this works, new column is created correctly
df["test"] = df["a"].apply(lambda x: "_" if x == "aaa" else x)
# this does not work, ie column test is not changed
idx = df["test"] == "_"
temp = df.loc[idx, "a"].apply(lambda x: "-----" if x == "aaa" else x)
df.loc[idx, "test"] = temp
assert df.iloc[0, 2] == "-----"
def test_multitype_list_index_access(self):
# GH 10610
df = DataFrame(np.random.random((10, 5)), columns=["a"] + [20, 21, 22, 23])
with pytest.raises(KeyError, match=re.escape("'[26, -8] not in index'")):
df[[22, 26, -8]]
assert df[21].shape[0] == df.shape[0]
def test_set_index_nan(self):
# GH 3586
df = DataFrame(
{
"PRuid": {
17: "nonQC",
18: "nonQC",
19: "nonQC",
20: "10",
21: "11",
22: "12",
23: "13",
24: "24",
25: "35",
26: "46",
27: "47",
28: "48",
29: "59",
30: "10",
},
"QC": {
17: 0.0,
18: 0.0,
19: 0.0,
20: np.nan,
21: np.nan,
22: np.nan,
23: np.nan,
24: 1.0,
25: np.nan,
26: np.nan,
27: np.nan,
28: np.nan,
29: np.nan,
30: np.nan,
},
"data": {
17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006,
},
"year": {
17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986,
},
}
).reset_index()
result = (
df.set_index(["year", "PRuid", "QC"])
.reset_index()
.reindex(columns=df.columns)
)
tm.assert_frame_equal(result, df)
def test_multi_assign(self):
# GH 3626, an assignment of a sub-df to a df
df = DataFrame(
{
"FC": ["a", "b", "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": list(range(6)),
"col2": list(range(6, 12)),
}
)
df.iloc[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isna()
cols = ["col1", "col2"]
dft = df2 * 2
dft.iloc[3, 3] = np.nan
expected = DataFrame(
{
"FC": ["a", np.nan, "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": Series([0, 1, 4, 6, 8, 10]),
"col2": [12, 7, 16, np.nan, 20, 22],
}
)
# frame on rhs
df2.loc[mask, cols] = dft.loc[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
# coerces to float64 because values has float64 dtype
# GH 14001
expected = DataFrame(
{
"FC": ["a", np.nan, "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": [0.0, 1.0, 4.0, 6.0, 8.0, 10.0],
"col2": [12, 7, 16, np.nan, 20, 22],
}
)
df2 = df.copy()
df2.loc[mask, cols] = dft.loc[mask, cols].values
tm.assert_frame_equal(df2, expected)
def test_multi_assign_broadcasting_rhs(self):
# broadcasting on the rhs is required
df = DataFrame(
{
"A": [1, 2, 0, 0, 0],
"B": [0, 0, 0, 10, 11],
"C": [0, 0, 0, 10, 11],
"D": [3, 4, 5, 6, 7],
}
)
expected = df.copy()
mask = expected["A"] == 0
for col in ["A", "B"]:
expected.loc[mask, col] = df["D"]
df.loc[df["A"] == 0, ["A", "B"]] = df["D"]
tm.assert_frame_equal(df, expected)
# TODO(ArrayManager) setting single item with an iterable doesn't work yet
# in the "split" path
@td.skip_array_manager_not_yet_implemented
def test_setitem_list(self):
# GH 6043
# iloc with a list
df = DataFrame(index=[0, 1], columns=[0])
df.iloc[1, 0] = [1, 2, 3]
df.iloc[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.iloc[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = DataFrame([1], Index([pd.Timestamp("2011-01-01")], dtype=object))
assert df.index._is_all_dates
with pytest.raises(KeyError, match="'2011'"):
df["2011"]
with pytest.raises(KeyError, match="'2011'"):
df.loc["2011", 0]
def test_string_slice_empty(self):
# GH 14424
df = DataFrame()
assert not df.index._is_all_dates
with pytest.raises(KeyError, match="'2011'"):
df["2011"]
with pytest.raises(KeyError, match="^0$"):
df.loc["2011", 0]
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame(
[["1", "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame(
[[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame(
[[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, "A"] = df.loc[:, "A"].astype(np.int64)
expected = DataFrame(
[[1, "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ["B", "C"]] = df.loc[:, ["B", "C"]].astype(np.int64)
expected = DataFrame(
[["1", 2, 3, ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
def test_astype_assignment_full_replacements(self):
# full replacements / no nans
df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]})
df.iloc[:, 0] = df["A"].astype(np.int64)
expected = DataFrame({"A": [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]})
df.loc[:, "A"] = df["A"].astype(np.int64)
expected = DataFrame({"A": [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [tm.getitem, tm.loc])
def test_index_type_coercion(self, indexer):
# GH 11836
# if we have an index type and set it with something that looks
# to numpy like the same, but is actually, not
# (e.g. setting with a float or string '0')
# then we need to coerce to object
# integer indexes
for s in [Series(range(5)), Series(range(5), index=range(1, 6))]:
assert s.index.is_integer()
s2 = s.copy()
indexer(s2)[0.1] = 0
assert s2.index.is_floating()
assert indexer(s2)[0.1] == 0
s2 = s.copy()
indexer(s2)[0.0] = 0
exp = s.index
if 0 not in s:
exp = Index(s.index.tolist() + [0])
tm.assert_index_equal(s2.index, exp)
s2 = s.copy()
indexer(s2)["0"] = 0
assert s2.index.is_object()
for s in [Series(range(5), index=np.arange(5.0))]:
assert s.index.is_floating()
s2 = s.copy()
indexer(s2)[0.1] = 0
assert s2.index.is_floating()
assert indexer(s2)[0.1] == 0
s2 = s.copy()
indexer(s2)[0.0] = 0
tm.assert_index_equal(s2.index, s.index)
s2 = s.copy()
indexer(s2)["0"] = 0
assert s2.index.is_object()
class TestMisc:
def test_float_index_to_mixed(self):
df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})
df["a"] = 10
expected = DataFrame({0.0: df[0.0], 1.0: df[1.0], "a": [10] * 10})
tm.assert_frame_equal(expected, df)
def test_float_index_non_scalar_assignment(self):
df = DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]}, index=[1.0, 2.0, 3.0])
df.loc[df.index[:2]] = 1
expected = DataFrame({"a": [1, 1, 3], "b": [1, 1, 5]}, index=df.index)
tm.assert_frame_equal(expected, df)
def test_loc_setitem_fullindex_views(self):
df = DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]}, index=[1.0, 2.0, 3.0])
df2 = df.copy()
df.loc[df.index] = df.loc[df.index]
tm.assert_frame_equal(df, df2)
def test_rhs_alignment(self):
# GH8258, tests that both rows & columns are aligned to what is
# assigned to. covers both uniform data-type & multi-type cases
def run_tests(df, rhs, right_loc, right_iloc):
# label, index, slice
lbl_one, idx_one, slice_one = list("bcd"), [1, 2, 3], slice(1, 4)
lbl_two, idx_two, slice_two = ["joe", "jolie"], [1, 2], slice(1, 3)
left = df.copy()
left.loc[lbl_one, lbl_two] = rhs
tm.assert_frame_equal(left, right_loc)
left = df.copy()
left.iloc[idx_one, idx_two] = rhs
tm.assert_frame_equal(left, right_iloc)
left = df.copy()
left.iloc[slice_one, slice_two] = rhs
tm.assert_frame_equal(left, right_iloc)
xs = np.arange(20).reshape(5, 4)
cols = ["jim", "joe", "jolie", "joline"]
df = DataFrame(xs, columns=cols, index=list("abcde"), dtype="int64")
# right hand side; permute the indices and multiplpy by -2
rhs = -2 * df.iloc[3:0:-1, 2:0:-1]
# expected `right` result; just multiply by -2
right_iloc = df.copy()
right_iloc["joe"] = [1, 14, 10, 6, 17]
right_iloc["jolie"] = [2, 13, 9, 5, 18]
right_iloc.iloc[1:4, 1:3] *= -2
right_loc = df.copy()
right_loc.iloc[1:4, 1:3] *= -2
# run tests with uniform dtypes
run_tests(df, rhs, right_loc, right_iloc)
# make frames multi-type & re-run tests
for frame in [df, rhs, right_loc, right_iloc]:
frame["joe"] = frame["joe"].astype("float64")
frame["jolie"] = frame["jolie"].map("@{}".format)
right_iloc["joe"] = [1.0, "@-28", "@-20", "@-12", 17.0]
right_iloc["jolie"] = ["@2", -26.0, -18.0, -10.0, "@18"]
run_tests(df, rhs, right_loc, right_iloc)
def test_str_label_slicing_with_negative_step(self):
SLC = pd.IndexSlice
for idx in [_mklbl("A", 20), np.arange(20) + 100, np.linspace(100, 150, 20)]:
idx = Index(idx)
ser = Series(np.arange(20), index=idx)
tm.assert_indexing_slices_equivalent(ser, SLC[idx[9] :: -1], SLC[9::-1])
tm.assert_indexing_slices_equivalent(ser, SLC[: idx[9] : -1], SLC[:8:-1])
tm.assert_indexing_slices_equivalent(
ser, SLC[idx[13] : idx[9] : -1], SLC[13:8:-1]
)
tm.assert_indexing_slices_equivalent(
ser, SLC[idx[9] : idx[13] : -1], SLC[:0]
)
def test_slice_with_zero_step_raises(self, indexer_sl, frame_or_series):
obj = frame_or_series(np.arange(20), index=_mklbl("A", 20))
with pytest.raises(ValueError, match="slice step cannot be zero"):
indexer_sl(obj)[::0]
def test_loc_setitem_indexing_assignment_dict_already_exists(self):
index = Index([-5, 0, 5], name="z")
df = DataFrame({"x": [1, 2, 6], "y": [2, 2, 8]}, index=index)
expected = df.copy()
rhs = {"x": 9, "y": 99}
df.loc[5] = rhs
expected.loc[5] = [9, 99]
tm.assert_frame_equal(df, expected)
# GH#38335 same thing, mixed dtypes
df = DataFrame({"x": [1, 2, 6], "y": [2.0, 2.0, 8.0]}, index=index)
df.loc[5] = rhs
expected = DataFrame({"x": [1, 2, 9], "y": [2.0, 2.0, 99.0]}, index=index)
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_indexing_dtypes_on_empty(self):
# Check that .iloc returns correct dtypes GH9983
df = DataFrame({"a": [1, 2, 3], "b": ["b", "b2", "b3"]})
df2 = df.iloc[[], :]
assert df2.loc[:, "a"].dtype == np.int64
tm.assert_series_equal(df2.loc[:, "a"], df2.iloc[:, 0])
@pytest.mark.parametrize("size", [5, 999999, 1000000])
def test_loc_range_in_series_indexing(self, size):
# range can cause an indexing error
# GH 11652
s = Series(index=range(size), dtype=np.float64)
s.loc[range(1)] = 42
tm.assert_series_equal(s.loc[range(1)], Series(42.0, index=[0]))
s.loc[range(2)] = 43
tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=[0, 1]))
def test_partial_boolean_frame_indexing(self):
# GH 17170
df = DataFrame(
np.arange(9.0).reshape(3, 3), index=list("abc"), columns=list("ABC")
)
index_df = DataFrame(1, index=list("ab"), columns=list("AB"))
result = df[index_df.notnull()]
expected = DataFrame(
np.array([[0.0, 1.0, np.nan], [3.0, 4.0, np.nan], [np.nan] * 3]),
index=list("abc"),
columns=list("ABC"),
)
tm.assert_frame_equal(result, expected)
def test_no_reference_cycle(self):
df = DataFrame({"a": [0, 1], "b": [2, 3]})
for name in ("loc", "iloc", "at", "iat"):
getattr(df, name)
wr = weakref.ref(df)
del df
assert wr() is None
def test_label_indexing_on_nan(self, nulls_fixture):
# GH 32431
df = Series([1, "{1,2}", 1, nulls_fixture])
vc = df.value_counts(dropna=False)
result1 = vc.loc[nulls_fixture]
result2 = vc[nulls_fixture]
expected = 1
assert result1 == expected
assert result2 == expected
class TestDataframeNoneCoercion:
EXPECTED_SINGLE_ROW_RESULTS = [
# For numeric series, we should coerce to NaN.
([1, 2, 3], [np.nan, 2, 3]),
([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),
# For datetime series, we should coerce to NaT.
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
# For objects, we should preserve the None value.
(["foo", "bar", "baz"], [None, "bar", "baz"]),
]
@pytest.mark.parametrize("expected", EXPECTED_SINGLE_ROW_RESULTS)
def test_coercion_with_loc(self, expected):
start_data, expected_result = expected
start_dataframe = DataFrame({"foo": start_data})
start_dataframe.loc[0, ["foo"]] = None
expected_dataframe = DataFrame({"foo": expected_result})
tm.assert_frame_equal(start_dataframe, expected_dataframe)
@pytest.mark.parametrize("expected", EXPECTED_SINGLE_ROW_RESULTS)
def test_coercion_with_setitem_and_dataframe(self, expected):
start_data, expected_result = expected
start_dataframe = DataFrame({"foo": start_data})
start_dataframe[start_dataframe["foo"] == start_dataframe["foo"][0]] = None
expected_dataframe = DataFrame({"foo": expected_result})
tm.assert_frame_equal(start_dataframe, expected_dataframe)
@pytest.mark.parametrize("expected", EXPECTED_SINGLE_ROW_RESULTS)
def test_none_coercion_loc_and_dataframe(self, expected):
start_data, expected_result = expected
start_dataframe = DataFrame({"foo": start_data})
start_dataframe.loc[start_dataframe["foo"] == start_dataframe["foo"][0]] = None
expected_dataframe = DataFrame({"foo": expected_result})
tm.assert_frame_equal(start_dataframe, expected_dataframe)
def test_none_coercion_mixed_dtypes(self):
start_dataframe = DataFrame(
{
"a": [1, 2, 3],
"b": [1.0, 2.0, 3.0],
"c": [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
"d": ["a", "b", "c"],
}
)
start_dataframe.iloc[0] = None
exp = DataFrame(
{
"a": [np.nan, 2, 3],
"b": [np.nan, 2.0, 3.0],
"c": [NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)],
"d": [None, "b", "c"],
}
)
tm.assert_frame_equal(start_dataframe, exp)
class TestDatetimelikeCoercion:
def test_setitem_dt64_string_scalar(self, tz_naive_fixture, indexer_sli):
# dispatching _can_hold_element to underlying DatetimeArray
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
ser = Series(dti)
values = ser._values
newval = "2018-01-01"
values._validate_setitem_value(newval)
indexer_sli(ser)[0] = newval
if tz is None:
# TODO(EA2D): we can make this no-copy in tz-naive case too
assert ser.dtype == dti.dtype
assert ser._values._data is values._data
else:
assert ser._values is values
@pytest.mark.parametrize("box", [list, np.array, pd.array, pd.Categorical, Index])
@pytest.mark.parametrize(
"key", [[0, 1], slice(0, 2), np.array([True, True, False])]
)
def test_setitem_dt64_string_values(self, tz_naive_fixture, indexer_sli, key, box):
# dispatching _can_hold_element to underling DatetimeArray
tz = tz_naive_fixture
if isinstance(key, slice) and indexer_sli is tm.loc:
key = slice(0, 1)
dti = date_range("2016-01-01", periods=3, tz=tz)
ser = Series(dti)
values = ser._values
newvals = box(["2019-01-01", "2010-01-02"])
values._validate_setitem_value(newvals)
indexer_sli(ser)[key] = newvals
if tz is None:
# TODO(EA2D): we can make this no-copy in tz-naive case too
assert ser.dtype == dti.dtype
assert ser._values._data is values._data
else:
assert ser._values is values
@pytest.mark.parametrize("scalar", ["3 Days", offsets.Hour(4)])
def test_setitem_td64_scalar(self, indexer_sli, scalar):
# dispatching _can_hold_element to underling TimedeltaArray
tdi = timedelta_range("1 Day", periods=3)
ser =
|
Series(tdi)
|
pandas.Series
|
#### Filename: Connection.py
#### Version: v1.0
#### Author: <NAME>
#### Date: March 4, 2019
#### Description: Connect to database and get atalaia dataframe.
import psycopg2
import sys
import os
import pandas as pd
import logging
from configparser import ConfigParser
from resqdb.CheckData import CheckData
import numpy as np
import time
from multiprocessing import Process, Pool
from threading import Thread
import collections
import datetime
import csv
from dateutil.relativedelta import relativedelta
import json
class Connection():
""" The class connecting to the database and exporting the data for the Slovakia.
:param nprocess: number of processes
:type nprocess: int
:param data: the name of data (resq or atalaia)
:type data: str
"""
def __init__(self, nprocess=1, data='resq'):
start = time.time()
# Create log file in the working folder
debug = 'debug_' + datetime.datetime.now().strftime('%d-%m-%Y') + '.log'
log_file = os.path.join(os.getcwd(), debug)
logging.basicConfig(filename=log_file,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
logging.info('Connecting to datamix database!')
# Get absolute path
path = os.path.dirname(__file__)
self.database_ini = os.path.join(path, 'database.ini')
# Read temporary csv file with CZ report names and Angels Awards report names
path = os.path.join(os.path.dirname(__file__), 'tmp', 'czech_mapping.json')
with open(path, 'r', encoding='utf-8') as json_file:
cz_names_dict = json.load(json_file)
# Set section
datamix = 'datamix-backup'
# datamix = 'datamix'
# Check which data should be exported
if data == 'resq':
# Create empty dictionary
# self.sqls = ['SELECT * from resq_mix', 'SELECT * from ivttby_mix', 'SELECT * from thailand', 'SELECT * from resq_ivttby_mix']
self.sqls = ['SELECT * from resq_mix', 'SELECT * from ivttby_mix', 'SELECT * from thailand']
# List of dataframe names
self.names = ['resq', 'ivttby', 'thailand']
elif data == 'atalaia':
self.sqls = ['SELECT * from atalaia_mix']
self.names = []
elif data == 'qasc':
self.sqls = ['SELECT * FROM qasc_mix']
self.names = []
elif data == 'africa':
self.sqls = ['SELECT * FROM africa_mix']
self.names = []
# Dictionary initialization - db dataframes
self.dictdb_df = {}
# Dictioanry initialization - prepared dataframes
self.dict_df = {}
if nprocess == 1:
if data == 'resq':
for i in range(0, len(self.names)):
df_name = self.names[i]
self.connect(self.sqls[i], datamix, nprocess, df_name=df_name)
# self.connect(self.sqls[2], datamix, nprocess, df_name='resq_ivttby_mix')
# self.resq_ivttby_mix = self.dictdb_df['resq_ivttby_mix']
# self.dictdb_df['resq_ivttby_mix'].to_csv('resq_ivttby_mix.csv', sep=',', index=False)
# if 'resq_ivttby_mix' in self.dictdb_df.keys():
# del self.dictdb_df['resq_ivttby_mix']
for k, v in self.dictdb_df.items():
self.prepare_df(df=v, name=k)
self.df = pd.DataFrame()
for i in range(0, len(self.names)):
self.df = self.df.append(self.dict_df[self.names[i]], sort=False)
logging.info("Connection: {0} dataframe has been appended to the resulting dataframe!".format(self.names[i]))
# Get all country code in dataframe
self.countries = self._get_countries(df=self.df)
# Get preprocessed data
self.preprocessed_data = self.check_data(df=self.df, nprocess=1)
self.preprocessed_data['RES-Q reports name'] = self.preprocessed_data.apply(lambda x: cz_names_dict[x['Protocol ID']]['report_name'] if 'Czech Republic' in x['Country'] and x['Protocol ID'] in cz_names_dict.keys() else x['Site Name'], axis=1)
self.preprocessed_data['ESO Angels name'] = self.preprocessed_data.apply(lambda x: cz_names_dict[x['Protocol ID']]['angels_name'] if 'Czech Republic' in x['Country'] and x['Protocol ID'] in cz_names_dict.keys() else x['Site Name'], axis=1)
##############
# ONSET TIME #
##############
self.preprocessed_data['HOSPITAL_TIME'] = pd.to_datetime(self.preprocessed_data['HOSPITAL_TIME'], format='%H:%M:%S').dt.time
try:
self.preprocessed_data['HOSPITAL_TIMESTAMP'] = self.preprocessed_data.apply(lambda x: datetime.datetime.combine(x['HOSPITAL_DATE'], x['HOSPITAL_TIME']) if not pd.isnull(x['HOSPITAL_TIME']) and not pd.isnull(x['HOSPITAL_DATE']) else None, axis=1)
#self.preprocessed_data['HOSPITAL_TIMESTAMP'] = pd.to_datetime(self.preprocessed_data['HOSPITAL_DATE'] + ' ' + self.preprocessed_data['HOSPITAL_TIME'])
except ValueError as error:
logging.error("Error occured when converting hospital date and time into timestamp object - {}.".format(error))
self.preprocessed_data['VISIT_DATE'] = self.preprocessed_data.apply(lambda x: self.fix_date(x['VISIT_DATE'], x['HOSPITAL_DATE']), axis=1)
self.preprocessed_data['VISIT_TIME'] = pd.to_datetime(self.preprocessed_data['VISIT_TIME'], format='%H:%M:%S').dt.time
try:
self.preprocessed_data['VISIT_TIMESTAMP'] = self.preprocessed_data.apply(lambda x: datetime.datetime.combine(x['VISIT_DATE'], x['VISIT_TIME']) if not pd.isnull(x['VISIT_TIME']) and not pd.isnull(x['VISIT_DATE']) else None, axis=1)
#self.preprocessed_data['VISIT_TIMESTAMP'] = pd.to_datetime(self.preprocessed_data['VISIT_DATE'] + ' ' + self.preprocessed_data['VISIT_TIME'])
except ValueError as error:
logging.error("Error occured when converting visit date and time into timestamp object - {}.".format(error))
# Get difference in minutes between hospitalization and last visit
self.preprocessed_data['LAST_SEEN_NORMAL'] = self.preprocessed_data.apply(lambda x: self.time_diff(x['VISIT_TIMESTAMP'], x['HOSPITAL_TIMESTAMP']), axis=1)
self.preprocessed_data['LAST_SEEN_NORMAL'].fillna(0, inplace=True)
# Create new column to set if patient has stroke in hospital and recanalization procedures were entered in timestamps
self.preprocessed_data['HOSPITAL_STROKE_IVT_TIMESTAMPS'] = np.nan
self.preprocessed_data.loc[
(self.preprocessed_data['HOSPITAL_STROKE'] == 1) &
((self.preprocessed_data['IVT_ONLY'] == 2) |
(self.preprocessed_data['IVT_TBY'] == 2) |
(self.preprocessed_data['IVT_TBY_REFER'] == 2)),
'HOSPITAL_STROKE_IVT_TIMESTAMPS'] = 1
self.preprocessed_data['HOSPITAL_STROKE_TBY_TIMESTAMPS'] = np.nan
self.preprocessed_data.loc[
(self.preprocessed_data['HOSPITAL_STROKE'] == 1) &
((self.preprocessed_data['IVT_TBY'] == 2) |
(self.preprocessed_data['TBY_ONLY'] == 2) |
(self.preprocessed_data['TBY_REFER_LIM'] == 2) |
(self.preprocessed_data['TBY_REFER_ALL'] == 2)),
'HOSPITAL_STROKE_TBY_TIMESTAMPS'] = 1
elif data == 'atalaia':
self.connect(self.sqls[0], datamix, nprocess, df_name='atalaia_mix')
self.atalaiadb_df = self.dictdb_df['atalaia_mix']
#self.atalaia_preprocessed_data = self.prepare_atalaia_df(self.atalaiadb_df)
self.atalaia_preprocessed_data = self.atalaiadb_df.copy()
del self.dictdb_df['atalaia_mix']
elif data == 'qasc':
self.__get_qasc_df(datamix, nprocess)
elif data == 'africa':
self.__get_africa_df(datamix, nprocess)
else:
if data == 'resq':
threads = []
for i in range(0, len(self.names)):
df_name = self.names[i]
process = Thread(target=self.connect(self.sqls[i], datamix, i, df_name=df_name))
process.start()
threads.append(process)
# logging.info('The process with id {0} is running.'.format(process))
process = Thread(target=self.connect(self.sqls[2], datamix, 1, df_name='resq_ivttby_mix'))
process.start()
threads.append(process)
for process in threads:
process.join()
end = time.time()
tdelta = (end-start)/60
logging.info('The database data were exported in {0} minutes.'.format(tdelta))
# self.dictdb_df['resq_ivttby_mix'].to_csv('resq_ivttby_mix.csv', sep=',', index=False)
if 'resq_ivttby_mix' in self.dictdb_df.keys():
del self.dictdb_df['resq_ivttby_mix']
treads = []
for i in range(0, len(self.names)):
df_name = self.names[i]
process = Thread(target=self.prepare_df(df=self.dictdb_df[df_name], name=df_name))
process.start()
threads.append(process)
for process in threads:
process.join()
end = time.time()
tdelta = (end-start)/60
logging.info('The database data were prepared in {0} minutes.'.format(tdelta))
self.df = pd.DataFrame()
for i in range(0, len(self.names)):
self.df = self.df.append(self.dict_df[self.names[i]], sort=False)
logging.info("Connection: {0} dataframe has been appended to the resulting dataframe!.".format(self.names[i]))
subject_ids = self.df['Subject ID'].tolist()
duplicates = [item for item, count in collections.Counter(subject_ids).items() if count > 1]
for i in duplicates:
duplicates_rows = self.df[(self.df['Subject ID'] == i) & (~pd.isnull(self.df['crf_parent_name']))]
set_tmp = set(duplicates_rows['Protocol ID'])
if len(set_tmp) == 1:
crfs = duplicates_rows['crf_parent_name'].tolist()
#print(duplicates_rows[['Subject ID', 'Protocol ID']])
for i in crfs:
if 'RESQV12' in i:
keep_crf = i
if 'RESQV20' in i:
keep_crf = i
if 'IVT_TBY' in i and 'DEVCZ10' not in i:
keep_crf = i
index = duplicates_rows.index[duplicates_rows['crf_parent_name'] != keep_crf].tolist()
self.df.drop(index, inplace=True)
#print(duplicates_rows['crf_parent_name'])
#print("Keep form: {0}, deleted row: {1}".format(keep_crf, index))
# Get all country code in dataframe
self.countries = self._get_countries(df=self.df)
# Cal check data function
self.preprocessed_data = self.check_data(self.df, nprocess=nprocess)
#self.preprocessed_data = self.check_data(self.df, nprocess=None)
self.preprocessed_data['RES-Q reports name'] = self.preprocessed_data.apply(lambda x: cz_names_dict[x['Protocol ID']]['report_name'] if 'Czech Republic' in x['Country'] and x['Protocol ID'] in cz_names_dict.keys() else x['Site Name'], axis=1)
self.preprocessed_data['ESO Angels name'] = self.preprocessed_data.apply(lambda x: cz_names_dict[x['Protocol ID']]['angels_name'] if 'Czech Republic' in x['Country'] and x['Protocol ID'] in cz_names_dict.keys() else x['Site Name'], axis=1)
##############
# ONSET TIME #
##############
self.preprocessed_data['HOSPITAL_TIME'] = pd.to_datetime(self.preprocessed_data['HOSPITAL_TIME'], format='%H:%M:%S').dt.time
try:
self.preprocessed_data['HOSPITAL_TIMESTAMP'] = self.preprocessed_data.apply(lambda x: datetime.datetime.combine(x['HOSPITAL_DATE'], x['HOSPITAL_TIME']) if not pd.isnull(x['HOSPITAL_TIME']) and not
|
pd.isnull(x['HOSPITAL_DATE'])
|
pandas.isnull
|
#! /usr/bin/env python
# coding: utf-8
#
"""ForecastGA: Ensembles"""
import pandas as pd
from sklearn.model_selection import train_test_split as tts
from sklearn.decomposition import PCA
from statsmodels.tools.eval_measures import rmse
from tsfresh.utilities.dataframe_functions import impute, roll_time_series
from tsfresh import extract_features
from tsfresh import select_features
import lightgbm as lgb
from forecastga.helpers.logging import get_logger
from forecastga.helpers.data import constant_feature_detect
_LOG = get_logger(__name__)
def ensemble_performance(forecasts):
dict_perf = {}
for col, _ in forecasts.iteritems():
dict_perf[col] = {}
dict_perf[col]["rmse"] = rmse(forecasts["Target"], forecasts[col])
dict_perf[col]["mse"] = dict_perf[col]["rmse"] ** 2
dict_perf[col]["mean"] = forecasts[col].mean()
return pd.DataFrame.from_dict(dict_perf)
def time_feature(df, perd):
if perd in ["MS", "M", "BM", "BMS"]:
df["month"] = df.index.month
elif perd in ["BH", "H"]:
df["hour"] = df.index.hour
elif perd == "B":
df["dayofweek"] = df.index.dayofweek
elif perd == "D":
df["dayofweek"] = df.index.dayofweek
elif perd in ["W", "W-SUN", "W-MON", "W-TUE", "W-WED", "W-THU", "W-FRI", "W-SAT"]:
df["week"] = df.index.week
elif perd in ["Q", "QS", "BQ", "BQS"]:
df["quarter"] = df.index.quarter
elif perd in ["T", "min"]:
df["minute"] = df.index.minute
elif perd == "S":
df["second"] = df.index.second
# elif perd in ["L","ms"]:
# periodocity = 1000
# elif perd in ["U","us"]:
# periodocity = 1000
# elif perd=="N":
# periodocity = 1000
return df
def ensemble_lightgbm(forecast_in, forecast_out, pred):
forecast_in_copy = forecast_in.copy()
forecast_in_copy = time_feature(forecast_in_copy, pred)
forecast_in_copy["mean"] = forecast_in_copy.drop(["Target"], axis=1).mean(axis=1)
forecast_train, forecast_test = tts(
forecast_in_copy, train_size=0.5, shuffle=False, stratify=None
)
target = "Target"
d_train = lgb.Dataset(
forecast_train.drop(columns=[target]), label=forecast_train[target]
)
params = {
"boosting_type": "gbdt",
"objective": "regression",
"metric": "rmsle",
"max_depth": 6,
"learning_rate": 0.1,
"verbose": 0,
"num_threads": 16,
}
model = lgb.train(params, d_train, 100, verbose_eval=1)
ensemble_lgb = pd.DataFrame(index=forecast_test.index)
ensemble_lgb["ensemble_lgb"] = model.predict(forecast_test.drop(columns=[target]))
ensemble_lgb_out = pd.DataFrame(index=forecast_out.index)
ensemble_lgb_out["ensemble_lgb"] = model.predict(forecast_out)
return ensemble_lgb, ensemble_lgb_out
def ensemble_tsfresh(forecast_in, forecast_out, season, perd):
"""
Create rolled time series for ts feature extraction
"""
def tsfresh_run(forecast, season, insample=True, forecast_out=None):
df_roll_prep = forecast.reset_index()
if insample:
df_roll_prep = df_roll_prep.drop(["Target", "Date"], axis=1)
df_roll_prep["id"] = 1
target = forecast["Target"]
else:
df_roll_prep = df_roll_prep.drop(["index"], axis=1)
df_roll_prep["id"] = 1
df_roll = roll_time_series(
df_roll_prep,
column_id="id",
column_sort=None,
column_kind=None,
rolling_direction=1,
max_timeshift=season - 1,
)
counts = df_roll["id"].value_counts()
df_roll_cut = df_roll[df_roll["id"].isin(counts[counts >= season].index)]
# TS feature extraction
concat_df = pd.DataFrame()
concat_df = extract_features(
df_roll_cut.ffill(),
column_id="id",
column_sort="sort",
n_jobs=season,
show_warnings=False,
disable_progressbar=True,
)
if insample:
concat_df = concat_df.dropna(axis=1, how="all")
concat_df.index = (
target[df_roll_cut["id"].value_counts().index]
.sort_index()
.to_frame()
.index
)
concat_df = pd.merge(
target[df_roll_cut["id"].value_counts().index].sort_index().to_frame(),
concat_df,
left_index=True,
right_index=True,
how="left",
)
concat_df_list = constant_feature_detect(data=concat_df, threshold=0.95)
concat_df = concat_df.drop(concat_df_list, axis=1)
else:
forecast_out.index.name = "Date"
concat_df.index = forecast_out.index
concat_df = impute(concat_df)
return concat_df
_LOG.info("LightGBM ensemble have been successfully built")
concat_df_drop_in = tsfresh_run(forecast_in, season, insample=True)
extracted_n_selected = select_features(
concat_df_drop_in.drop("Target", axis=1),
concat_df_drop_in["Target"],
fdr_level=0.01,
n_jobs=12,
) # fdr is the significance level.
forecast_out_add = pd.concat(
(forecast_in.iloc[-season + 1 :, :].drop(["Target"], axis=1), forecast_out),
axis=0,
)
concat_df_drop_out = tsfresh_run(
forecast_out_add, season, insample=False, forecast_out=forecast_out
)
extracted_n_selected_out = concat_df_drop_out[extracted_n_selected.columns]
# Reduce the dimensions of generated time series features
pca2 = PCA(n_components=8)
pca2.fit(extracted_n_selected)
pca2_results_in = pca2.transform(extracted_n_selected)
pca2_results_out = pca2.transform(extracted_n_selected_out)
cols = 0
for i in range(pca2_results_in.shape[1]):
cols = cols + 1
extracted_n_selected["pca_" + str(i)] = pca2_results_in[:, i]
extracted_n_selected_out["pca_" + str(i)] = pca2_results_out[:, i]
df = forecast_in.iloc[season - 1 :, :].copy()
df = time_feature(df, perd)
df["mean"] = df.drop(["Target"], axis=1).mean(axis=1)
df_new = pd.concat(
(df.reset_index(), extracted_n_selected.iloc[:, -cols:].reset_index(drop=True)),
axis=1,
)
df_new = df_new.set_index("Date")
forecast_train, forecast_test = tts(
df_new, train_size=0.5, shuffle=False, stratify=None
)
target = "Target"
d_train = lgb.Dataset(
forecast_train.drop(columns=[target]), label=forecast_train[target]
)
params = {
"boosting_type": "gbdt",
"objective": "regression",
"metric": "rmsle",
"max_depth": 6,
"learning_rate": 0.1,
"verbose": 0,
"num_threads": 16,
}
model = lgb.train(params, d_train, 100, verbose_eval=1)
ensemble_ts = pd.DataFrame(index=forecast_test.index)
ensemble_ts["ensemble_ts"] = model.predict(forecast_test.drop(columns=[target]))
df_out = forecast_out.copy()
df_out = time_feature(df_out, perd)
df_out["mean"] = df_out.mean(axis=1)
ensemble_ts_out =
|
pd.DataFrame(index=df_out.index)
|
pandas.DataFrame
|
# Features: Word Embeddings
# Models: SVM
import time
import pickle
import json
import argparse
from tqdm import tqdm
import gensim.downloader as api
import gensim
import spacy
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
from scorer.main import evaluate
from sklearn.preprocessing import StandardScaler
from sklearn import decomposition, ensemble, tree
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm
import sys
import os
sys.path.append('.')
# from thundersvm import *
my_loc = os.path.dirname(__file__)
data_path = os.path.join(my_loc, 'data')
parser = argparse.ArgumentParser(description='Training for Word Embs')
parser.add_argument('--normalize', type=int, default=1,
help='0,1')
parser.add_argument('--bert_type', type=int, default=0,
help='0,1,2,3')
parser.add_argument('--gpu_id', type=int, default=0,
help='0,1,2,3')
args = parser.parse_args()
def get_best_svm_model(feature_vector_train, label, feature_vector_valid, fname, emb_type):
# param_grid = [{'kernel':'linear', 'C': np.logspace(-2, 2, 10), 'gamma': [1]},
# {'kernel':'rbf', 'C': np.logspace(-2, 2, 10),
# 'gamma': np.logspace(-2, 2, 10)}]
param_grid = [{'kernel': 'rbf', 'C': np.logspace(-3, 3, 30),
'gamma': np.logspace(-3, 3, 30)}]
pca_list = [1.0, 0.99, 0.98, 0.97, 0.96, 0.95]
best_acc = 0.0
best_model = 0
best_prec = 0.0
best_pca_nk = 0
temp_xtrain = feature_vector_train
temp_xval = feature_vector_valid
for pca_nk in tqdm(pca_list, desc='pca_list'):
print(pca_nk)
if pca_nk != 1.0:
pca = decomposition.PCA(n_components=pca_nk).fit(temp_xtrain)
feature_vector_train = pca.transform(temp_xtrain)
feature_vector_valid = pca.transform(temp_xval)
for params in tqdm(param_grid, desc='param_grid', leave=False):
for C in tqdm(params['C'], desc='params[C]', leave=False):
for gamma in tqdm(params['gamma'], desc='params[gamma]', leave=False):
# Model with different parameters
model = svm.SVC(
C=C,
gamma=gamma,
kernel=params['kernel'],
random_state=42,
class_weight='balanced',
# gpu_id=args.gpu_id
)
# fit the training dataset on the classifier
model.fit(feature_vector_train, label)
# predict the acc on validation dataset
acc = model.score(feature_vector_valid, val_y)
predicted_distance = model.decision_function(
feature_vector_valid)
results_fpath = my_loc + \
'/results/bert_word_pos_%s_%s_svm_norm%d.tsv' % (
fname, emb_type, args.normalize)
with open(results_fpath, "w") as results_file:
for i, line in valDF.iterrows():
dist = predicted_distance[i]
results_file.write("{}\t{}\t{}\t{}\n".format('covid-19', line['tweet_id'],
dist, "bert_wd_pos"))
_, _, avg_precision, _, _ = evaluate(
'data/dev.tsv', results_fpath)
if round(avg_precision, 4) >= round(best_prec, 4) and round(acc, 2) >= round(best_acc, 2):
best_prec = avg_precision
best_acc = acc
best_model = model
best_pca_nk = pca_nk
return best_acc, best_pca_nk, best_model
def get_tweet_data(tweet_list):
twit_y, twit_id = [], []
for id in tweet_list:
twit_id.append(id)
twit_y.append(tweet_list[id]['worthy'])
tweetDF =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import json
import itertools as it
import pysam
import multiprocessing as mp
import subprocess as sp
import h5py
import pybedtools
from DIGDriver.data_tools import mutation_tools
from DIGDriver.sequence_model import genic_driver_tools
DNA53 = 'NTCGA'
DNA35 = 'NAGCT'
trans = DNA53.maketrans(DNA53, DNA35)
def reverse_complement(seq):
return seq[::-1].translate(trans)
def fetch_sequence(fasta, CHROM, START, END, n_up=2, n_down=2):
""" Fetch a sequence expanded by one bp on either end
to allow for trinucleotide counting of all positions
"""
if START == 0:
START = n_up
seq = fasta.fetch(CHROM, START-n_up, END+n_down).upper()
return seq, START-n_up, END+n_down
def mk_context_sequences(n_up=2, n_down=2, collapse=False):
DNA = 'ACGT'
NUC = 'ACGT'
if collapse:
NUC = 'CT'
prod_items = [DNA]*n_up + [NUC] + [DNA]*n_down
keys = [''.join(tup) for tup in it.product(*prod_items)]
return {key: 0 for key in keys}
def seq_to_context(seq, baseix=2, collapse=False):
""" Convert any sequence into
its unique nucleotide context
kwarg baseix: the index of the base around which the context is constructed
"""
if 'N' in seq:
return ''
if collapse:
if seq[baseix] == 'G' or seq[baseix] == 'A':
return reverse_complement(seq)
return seq
def type_mutation(REF, ALT, collapse=False):
if collapse:
if REF == 'G' or REF == 'A':
REF = REF.translate(trans)
ALT = ALT.translate(trans)
return "{}>{}".format(REF, ALT)
def count_sequence_context(seq, n_up=2, n_down=2, nuc_dict=None, collapse=False):
""" Count the nucleotides context present in a sequence
"""
if not nuc_dict:
nuc_dict = mk_context_sequences(n_up=n_up, n_down=n_down, collapse=collapse)
for i in range(n_up, len(seq)-n_down):
substr = seq_to_context(seq[i-n_up:i+n_down+1], baseix=n_up, collapse=collapse)
if not substr:
continue
nuc_dict[substr] += 1
return nuc_dict
def count_contexts_by_regions(f_fasta, chrom_lst, start_lst, end_lst, n_up=2, n_down=2, collapse=False):
""" Sequence context counts within a set of regions
"""
fasta = pysam.FastaFile(f_fasta)
# print(set(chrom_lst), end = " ")
idx_lst = []
dict_lst = []
for CHROM, START, END in zip(chrom_lst, start_lst, end_lst):
seq, _, _ = fetch_sequence(fasta, CHROM, START, END, n_up=n_up, n_down=n_down)
dict_lst.append(count_sequence_context(seq, n_up=n_up, n_down=n_down, collapse=collapse))
idx_lst.append("{}:{}-{}".format(CHROM, START, END))
return
|
pd.DataFrame(dict_lst, index=idx_lst)
|
pandas.DataFrame
|
import pandas as pd
import matplotlib.pyplot as plt
path="/tmp/pycharm_project_355/"
#-------------read csv---------------------
df_2010_2011 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2010_2011.csv")
df_2012_2013 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2012_2013.csv")
df_2014_2015 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2014_2015.csv")
df_2016_2017 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2016_2017.csv")
df_2018_2019 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2018_2019.csv")
df_2010_2011.fillna(0)
df_2012_2013.fillna(0)
df_2014_2015.fillna(0)
df_2016_2017.fillna(0)
df_2018_2019.fillna(0)
# def merge_all_data():
# df_1 =pd.concat([df_2010_2011,df_2012_2013],ignore_index=True)
# df_1.to_csv(path+"somedata.csv")
# # df_2 =pd.concat([df_1,df_2014_2015],ignore_index=True)
# # df_3 =pd.concat([df_2,df_2016_2017],ignore_index=True)
# # df_4 =pd.concat([df_3,df_2018_2019],ignore_index=True)
# # df_4.to_csv(path+"allData.csv")
# return df_1
# def group_by_years(column_name):
# df_2010 = df_2010_2011.groupby('siteid')[column_name].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
# df_2011 = df_2010_2011.groupby('siteid')[column_name].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
# df_2012 = df_2012_2013.groupby('siteid')[column_name].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
# df_2013 = df_2012_2013.groupby('siteid')[column_name].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
# df_2014 = df_2014_2015.groupby('siteid')[column_name].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
# df_2015 = df_2014_2015.groupby('siteid')[column_name].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
# df_2016 = df_2016_2017.groupby('siteid')[column_name].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
# df_2017 = df_2016_2017.groupby('siteid')[column_name].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
# df_2018 = df_2018_2019.groupby('siteid')[column_name].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
# df_2019 = df_2018_2019.groupby('siteid')[column_name].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
#
# df1 = pd.merge(df_2010, df_2011, on='siteid')
# df2 = pd.merge(df1, df_2012, on='siteid')
# df3 = pd.merge(df2, df_2013, on='siteid')
# df4 = pd.merge(df3, df_2014, on='siteid')
# df5 = pd.merge(df4, df_2015, on='siteid')
# df6 = pd.merge(df5, df_2016, on='siteid')
# df7 = pd.merge(df6, df_2017, on='siteid')
# df8 = pd.merge(df7, df_2018, on='siteid')
# df_all = pd.merge(df8, df_2019, on='siteid')
#
# return df_all
def group_by_sum(group_by_value,column_name,lambda_val):
df_2010_2011_gb = df_2010_2011.groupby(group_by_value)[column_name].apply(lambda x: (x== lambda_val ).sum()).reset_index(name=column_name)
df_2012_2013_gb = df_2012_2013.groupby(group_by_value)[column_name].apply(lambda x: (x== lambda_val ).sum()).reset_index(name=column_name)
df_2014_2015_gb = df_2014_2015.groupby(group_by_value)[column_name].apply(lambda x: (x== lambda_val ).sum()).reset_index(name=column_name)
df_2016_2017_gb = df_2016_2017.groupby(group_by_value)[column_name].apply(lambda x: (x== lambda_val ).sum()).reset_index(name=column_name)
df_2018_2019_gb = df_2018_2019.groupby(group_by_value)[column_name].apply(lambda x: (x== lambda_val ).sum()).reset_index(name=column_name)
df_merge_1=pd.merge(df_2010_2011_gb,df_2012_2013_gb, on=group_by_value)
df_merge_2=pd.merge(df_merge_1,df_2014_2015_gb, on=group_by_value)
df_merge_3=pd.merge(df_merge_2,df_2016_2017_gb, on=group_by_value)
df_merge_4=pd.merge(df_merge_3,df_2018_2019_gb, on=group_by_value)
cols = df_merge_4.columns.difference([group_by_value])
df_merge_4[column_name] = df_merge_4.loc[:,cols].sum(axis=1)
df_new=pd.DataFrame()
df_new[group_by_value] = df_merge_4[group_by_value]
df_new[column_name] = df_merge_4[column_name]
return df_new
def group_by_count(group_by_value,name):
df_2010_2011_gb = df_2010_2011.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2012_2013_gb = df_2012_2013.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2014_2015_gb = df_2014_2015.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2016_2017_gb = df_2016_2017.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2018_2019_gb = df_2018_2019.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_merge_1=pd.merge(df_2010_2011_gb,df_2012_2013_gb, on=group_by_value)
df_merge_2=pd.merge(df_merge_1,df_2014_2015_gb, on=group_by_value)
df_merge_3=pd.merge(df_merge_2,df_2016_2017_gb, on=group_by_value)
df_merge_4=pd.merge(df_merge_3,df_2018_2019_gb, on=group_by_value)
cols = df_merge_4.columns.difference([group_by_value])
df_merge_4[name] = df_merge_4.loc[:,cols].sum(axis=1)
df_new=pd.DataFrame()
df_new[group_by_value] = df_merge_4[group_by_value]
df_new[name] = df_merge_4[name]
return df_new
def group_by_mean(group_by_value,column_name,name):
df_2010_2011_gb_sum = df_2010_2011.groupby(group_by_value)[column_name].sum().reset_index(name=name)
df_2010_2011_gb_count = df_2010_2011.groupby(group_by_value)[column_name].count().reset_index(name=name)
df_2012_2013_gb_sum = df_2012_2013.groupby(group_by_value)[column_name].sum().reset_index(name=name)
df_2012_2013_gb_count = df_2012_2013.groupby(group_by_value)[column_name].count().reset_index(name=name)
df_2014_2015_gb_sum = df_2014_2015.groupby(group_by_value)[column_name].sum().reset_index(name=name)
df_2014_2015_gb_count = df_2014_2015.groupby(group_by_value)[column_name].count().reset_index(name=name)
df_2016_2017_gb_sum = df_2016_2017.groupby(group_by_value)[column_name].sum().reset_index(name=name)
df_2016_2017_gb_count = df_2016_2017.groupby(group_by_value)[column_name].count().reset_index(name=name)
df_2018_2019_gb_sum = df_2018_2019.groupby(group_by_value)[column_name].sum().reset_index(name=name)
df_2018_2019_gb_count = df_2018_2019.groupby(group_by_value)[column_name].count().reset_index(name=name)
df_merge_1_sum=pd.merge(df_2010_2011_gb_sum,df_2012_2013_gb_sum, on=group_by_value)
df_merge_2_sum=pd.merge(df_merge_1_sum,df_2014_2015_gb_sum, on=group_by_value)
df_merge_3_sum=pd.merge(df_merge_2_sum,df_2016_2017_gb_sum, on=group_by_value)
df_merge_4_sum=pd.merge(df_merge_3_sum,df_2018_2019_gb_sum, on=group_by_value)
df_merge_1_count = pd.merge(df_2010_2011_gb_count, df_2012_2013_gb_count, on=group_by_value)
df_merge_2_count = pd.merge(df_merge_1_count, df_2014_2015_gb_count, on=group_by_value)
df_merge_3_count = pd.merge(df_merge_2_count, df_2016_2017_gb_count, on=group_by_value)
df_merge_4_count =
|
pd.merge(df_merge_3_count, df_2018_2019_gb_count, on=group_by_value)
|
pandas.merge
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def graph_price_seniment(sentiment, price):
"""
This method graphs the sentiment as a bar graph over the price which is a line graph
:param sentiment: the sentiment dateTime series containing sentiment scores at a given time
:param price: the price dateTime series which has the value for each hour
"""
sentiment =
|
pd.Series(sentiment['Sentiment'], index=sentiment.index)
|
pandas.Series
|
import os, glob
import fnmatch
import pandas as pd
import numpy as np
from astropy.io import fits
import psycopg2
from astropy.table import Table
from datetime import datetime
start = datetime.now()
class SPECData():
def __init__(self, start, end, mode):
self.mode = mode #new, update
self.start_date = start
self.end_date = end
self.save_dir = self.data_dir = os.path.join(os.environ['DATA_DIR'],'detector')
self.spec_file = 'spec_all.fits.gz'
self.conn = psycopg2.connect(host="db.replicator.dev-cattle.stable.spin.nersc.org", port="60042", database="desi_dev", user="desi_reader", password="<PASSWORD>")
def get_exp_df(self):
exp_cols = ['id','data_location','targtra','targtdec','skyra','skydec','deltara','deltadec','reqtime','exptime','flavor','program','lead','focus','airmass',
'mountha','zd','mountaz','domeaz','spectrographs','s2n','transpar','skylevel','zenith','mjd_obs','date_obs','night','moonra','moondec','parallactic','mountel',
'dome','telescope','tower','hexapod','adc','sequence','obstype']
exp_df = pd.read_sql_query(f"SELECT * FROM exposure WHERE date_obs >= '{self.start_date}' AND date_obs < '{self.end_date}'", self.conn)
exp_df_new = exp_df[exp_cols]
self.exp_df_new = exp_df_new.rename(columns={'id':'EXPID'})
self.nights = np.unique(self.exp_df_new['night'])
self.dates = [int(d) for d in self.nights[np.isfinite(self.nights)]]
self.exp_df_base = self.exp_df_new[['EXPID','date_obs']]
def get_spec_df(self):
spec_cols = ['nir_camera_temp', 'nir_camera_humidity','red_camera_temp', 'red_camera_humidity', 'blue_camera_temp','blue_camera_humidity',
'bench_cryo_temp', 'bench_nir_temp','bench_coll_temp', 'ieb_temp', 'time_recorded', 'unit']
spec_df = pd.read_sql_query(f"SELECT * FROM spectrographs_sensors WHERE time_recorded >= '{self.start_date}' AND time_recorded <'{self.end_date}'", self.conn)
spec_df_new = spec_df[spec_cols]
dfs = []
for un in range(10):
df = spec_df_new[spec_df_new.unit == un]
cold = {}
for col in df.columns:
new_col = col + '_' + str(un)
cold[col] = new_col
df = df.rename(columns=cold)
idx = []
for time in self.exp_df_base.date_obs:
ix = np.argmin(np.abs(df['time_recorded_{}'.format(un)] - time))
idx.append(ix)
df = df.iloc[idx]
df = df.reset_index(drop=True)
dfs.append(df)
self.spec_df_final = pd.concat(dfs, axis=1)
self.spec_df_final['EXPID'] = self.exp_df_base['EXPID']
#spec_df.to_csv('spec_by_unit.csv')
spec_mean_df = self.exp_df_base.copy()
for attr in ['nir_camera_temp', 'nir_camera_humidity','red_camera_temp', 'red_camera_humidity', 'blue_camera_temp','blue_camera_humidity', 'bench_cryo_temp', 'bench_nir_temp','bench_coll_temp', 'ieb_temp']:
x = []
for i in range(10):
df = dfs[i]
x.append(df[attr+'_{}'.format(i)])
spec_mean_df[attr+'_mean'] = np.mean(x, axis=0)
self.spec_mean_df_final = spec_mean_df
def get_gfa_df(self):
gfa_cols = ['time_recorded','ccdtemp','hotpeltier','coldpeltier','filter','humid2','humid3','fpga','camerahumid','cameratemp','unit']
gfa_df = pd.read_sql_query(f"SELECT * FROM gfa_telemetry WHERE time_recorded >= '{self.start_date}' AND time_recorded <'{self.end_date}'", self.conn)
gfa_df_new = gfa_df[gfa_cols]
#Rearrange Columns for GFA by number
dfs = []
for un in range(10):
df = gfa_df_new[gfa_df_new.unit == un]
cold = {}
for col in df.columns:
new_col = col + '_' + str(un)
cold[col] = new_col
df = df.rename(columns=cold)
idx = []
for time in self.exp_df_base.date_obs:
ix = np.argmin(np.abs(df['time_recorded_{}'.format(un)] - time))
idx.append(ix)
df = df.iloc[idx]
new_cols = df.columns[1:-1]
df = df[new_cols]
df = df.reset_index(drop=True)
dfs.append(df)
self.gfa_df_final = pd.concat(dfs, axis=1)
self.gfa_df_final['EXPID'] = self.exp_df_base['EXPID']
def get_shack_df(self):
shack_cols = ['room_pressure','space_temp1', 'reheat_temp', 'space_humidity','time_recorded', 'heater_output', 'space_temp2', 'space_temp4','space_temp_avg', 'space_temp3', 'cooling_coil_temp','chilled_water_output']
shack_df = pd.read_sql_query(f"SELECT * FROM shack_wec WHERE time_recorded >= '{self.start_date}' AND time_recorded <'{self.end_date}'", self.conn)
shack_df = shack_df[shack_cols]
idx = []
for time in self.exp_df_base.date_obs:
ix = np.argmin(np.abs(shack_df['time_recorded'] - time))
idx.append(ix)
shack_df_new = shack_df.iloc[idx]
shack_df_new = shack_df_new.rename(columns={'time_recorded':'guider_time_recorded'})
#shack_df_new['EXPID'] = self.exp_df_base['EXPID'] #pd.concat([shack_df_new, self.exp_df_base])
self.shack_df_final = shack_df_new.reset_index(drop=True)
def combine_specs(self, df_, cols_):
dfs = []
for un in range(10):
cols = []
for attr in cols_:
cols.append(attr+'_{}'.format(un))
df = df_[cols]
df['SPECTRO'] = un
new_cols = {}
for col in cols_:
new_cols[col+'_{}'.format(un)] = col
df = df.rename(columns=new_cols)
df = pd.concat([self.exp_df_base, df], axis = 1)
dfs.append(df)
df_final = pd.concat(dfs)
return df_final
def per_amp_columns(self, full_df):
dfs = []
for amp in ['A','B','C','D']:
df = full_df[full_df.AMP == amp][['NIGHT','EXPID','SPECTRO','CAM','READNOISE','BIAS', 'COSMICS_RATE']]
cold = {'NIGHT':'NIGHT','EXPID':'EXPID','SPECTRO':'SPECTRO','CAM':'CAM'}
for col in ['READNOISE','BIAS', 'COSMICS_RATE']:
new_col = col + '_' + amp
cold[col] = new_col
df = df.rename(columns=cold)
df = df.reset_index(drop=True)
dfs.append(df)
full_df.drop(['AMP','READNOISE','BIAS', 'COSMICS_RATE'], axis=1, inplace=True)
full_df.drop_duplicates(subset=['NIGHT', 'EXPID', 'SPECTRO','CAM'], keep='first')
for df in dfs:
full_df =
|
pd.merge(full_df, df, on=['NIGHT','EXPID','SPECTRO','CAM'], how='left')
|
pandas.merge
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
def __boxplot(data, name):
plt.figure(figsize=(18, 12))
for i in range(30):
plt.subplot(5, 6, i + 1)
data.iloc[:, i:i + 1].boxplot()
plt.subplots_adjust(hspace=0.3, wspace=0.3)
plt.savefig(name)
# TODO 这样画出的密度图对连续分布和离散分布的数据没有区分
def __density(data, name):
plt.figure(figsize=(18, 12))
for i in range(30):
plt.ylabel(None)
plt.subplot(5, 6, i + 1)
if i == 29:
continue
else:
data.iloc[:, i].plot(kind='kde', label=data.columns[i])
plt.legend(loc='upper right')
plt.subplots_adjust(hspace=0.3, wspace=0.3)
plt.savefig(name)
def bar_plot():
data = pd.read_csv('HOIP-30_drop.csv')
data.drop(['A-site', 'B-site', 'X-site'], axis=1, inplace=True)
scaler = MinMaxScaler()
npdata = scaler.fit_transform(data)
data = pd.DataFrame(npdata, columns=data.columns)
data.plot(kind='hist', bins=100, subplots=True, figsize=(20, 40))
plt.savefig('data_bar_plot.png')
def raw_data_describe():
data = pd.read_csv('HOIP-30_drop.csv', header=0)
data.drop(['A-site', 'B-site', 'X-site'], axis=1, inplace=True)
data.describe(include=[np.number]).to_csv('eda\\raw_describe.csv')
__boxplot(data, 'eda\\raw_boxplot.png')
data.drop(['X_f-electron'], axis=1, inplace=True)
__density(data, 'eda\\raw_density.png')
def pre_processing_data_describe():
scaler = MinMaxScaler()
data =
|
pd.read_csv('HOIP-30_drop.csv', header=0)
|
pandas.read_csv
|
"""
Module for applying conditional formatting to DataFrames and Series.
"""
from collections import defaultdict
from contextlib import contextmanager
import copy
from functools import partial
from itertools import product
from typing import (
Any,
Callable,
DefaultDict,
Dict,
List,
Optional,
Sequence,
Tuple,
Union,
)
from uuid import uuid1
import numpy as np
from pandas._config import get_option
from pandas._libs import lib
from pandas._typing import Axis, FrameOrSeries, FrameOrSeriesUnion, Label
from pandas.compat._optional import import_optional_dependency
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_float
import pandas as pd
from pandas.api.types import is_dict_like, is_list_like
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import _shared_docs
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
jinja2 = import_optional_dependency("jinja2", extra="DataFrame.style requires jinja2.")
try:
import matplotlib.pyplot as plt
from matplotlib import colors
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func: Callable):
if has_mpl:
yield plt, colors
else:
raise ImportError(no_mpl_message.format(func.__name__))
class Styler:
"""
Helps style a DataFrame or Series according to the data with HTML and CSS.
Parameters
----------
data : Series or DataFrame
Data to be styled - either a Series or DataFrame.
precision : int
Precision to round floats to, defaults to pd.options.display.precision.
table_styles : list-like, default None
List of {selector: (attr, value)} dicts; see Notes.
uuid : str, default None
A unique identifier to avoid CSS collisions; generated automatically.
caption : str, default None
Caption to attach to the table.
table_attributes : str, default None
Items that show up in the opening ``<table>`` tag
in addition to automatic (by default) id.
cell_ids : bool, default True
If True, each cell will have an ``id`` attribute in their HTML tag.
The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``
where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row
number and ``<num_col>`` is the column number.
na_rep : str, optional
Representation for missing values.
If ``na_rep`` is None, no special formatting is applied
.. versionadded:: 1.0.0
Attributes
----------
env : Jinja2 jinja2.Environment
template : Jinja2 Template
loader : Jinja2 Loader
See Also
--------
DataFrame.style : Return a Styler object containing methods for building
a styled HTML representation for the DataFrame.
Notes
-----
Most styling will be done by passing style functions into
``Styler.apply`` or ``Styler.applymap``. Style functions should
return values with strings containing CSS ``'attr: value'`` that will
be applied to the indicated cells.
If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
to automatically render itself. Otherwise call Styler.render to get
the generated HTML.
CSS classes are attached to the generated HTML
* Index and Column names include ``index_name`` and ``level<k>``
where `k` is its level in a MultiIndex
* Index label cells include
* ``row_heading``
* ``row<n>`` where `n` is the numeric position of the row
* ``level<k>`` where `k` is the level in a MultiIndex
* Column label cells include
* ``col_heading``
* ``col<n>`` where `n` is the numeric position of the column
* ``evel<k>`` where `k` is the level in a MultiIndex
* Blank cells include ``blank``
* Data cells include ``data``
"""
loader = jinja2.PackageLoader("pandas", "io/formats/templates")
env = jinja2.Environment(loader=loader, trim_blocks=True)
template = env.get_template("html.tpl")
def __init__(
self,
data: FrameOrSeriesUnion,
precision: Optional[int] = None,
table_styles: Optional[List[Dict[str, List[Tuple[str, str]]]]] = None,
uuid: Optional[str] = None,
caption: Optional[str] = None,
table_attributes: Optional[str] = None,
cell_ids: bool = True,
na_rep: Optional[str] = None,
):
self.ctx: DefaultDict[Tuple[int, int], List[str]] = defaultdict(list)
self._todo: List[Tuple[Callable, Tuple, Dict]] = []
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("``data`` must be a Series or DataFrame")
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_unique or not data.columns.is_unique:
raise ValueError("style is not supported for non-unique indices.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = get_option("display.precision")
self.precision = precision
self.table_attributes = table_attributes
self.hidden_index = False
self.hidden_columns: Sequence[int] = []
self.cell_ids = cell_ids
self.na_rep = na_rep
# display_funcs maps (row, col) -> formatting function
def default_display_func(x):
if self.na_rep is not None and pd.isna(x):
return self.na_rep
elif is_float(x):
display_format = f"{x:.{self.precision}f}"
return display_format
else:
return x
self._display_funcs: DefaultDict[
Tuple[int, int], Callable[[Any], str]
] = defaultdict(lambda: default_display_func)
def _repr_html_(self) -> str:
"""
Hooks into Jupyter notebook rich display system.
"""
return self.render()
@Appender(
_shared_docs["to_excel"]
% dict(
axes="index, columns",
klass="Styler",
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel="\n .. versionadded:: 0.20",
)
)
def to_excel(
self,
excel_writer,
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Sequence[Label]] = None,
header: Union[Sequence[Label], bool] = True,
index: bool = True,
index_label: Optional[Union[Label, Sequence[Label]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(
self,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep,
)
formatter.write(
excel_writer,
sheet_name=sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
engine=engine,
)
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}.
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
hidden_index = self.hidden_index
hidden_columns = self.hidden_columns
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def format_attr(pair):
return f"{pair['key']}={pair['value']}"
# for sparsifying a MultiIndex
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns, hidden_columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle_map = defaultdict(list)
head = []
for r in range(n_clvls):
# Blank for Index columns...
row_es = [
{
"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": not hidden_index,
"class": " ".join([BLANK_CLASS]),
}
] * (n_rlvls - 1)
# ... except maybe the last for columns.names
name = self.data.columns.names[r]
cs = [
BLANK_CLASS if name is None else INDEX_NAME_CLASS,
f"level{r}",
]
name = BLANK_VALUE if name is None else name
row_es.append(
{
"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": not hidden_index,
}
)
if clabels:
for c, value in enumerate(clabels[r]):
cs = [
COL_HEADING_CLASS,
f"level{r}",
f"col{c}",
]
cs.extend(
cell_context.get("col_headings", {}).get(r, {}).get(c, [])
)
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengths),
}
colspan = col_lengths.get((r, c), 0)
if colspan > 1:
es["attributes"] = [
format_attr({"key": "colspan", "value": colspan})
]
row_es.append(es)
head.append(row_es)
if (
self.data.index.names
and com.any_not_none(*self.data.index.names)
and not hidden_index
):
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS, f"level{c}"]
name = "" if name is None else name
index_header_row.append(
{"type": "th", "value": name, "class": " ".join(cs)}
)
index_header_row.extend(
[{"type": "th", "value": BLANK_VALUE, "class": " ".join([BLANK_CLASS])}]
* (len(clabels[0]) - len(hidden_columns))
)
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
row_es = []
for c, value in enumerate(rlabels[r]):
rid = [
ROW_HEADING_CLASS,
f"level{c}",
f"row{r}",
]
es = {
"type": "th",
"is_visible": (_is_visible(r, c, idx_lengths) and not hidden_index),
"value": value,
"display_value": value,
"id": "_".join(rid[1:]),
"class": " ".join(rid),
}
rowspan = idx_lengths.get((c, r), 0)
if rowspan > 1:
es["attributes"] = [
format_attr({"key": "rowspan", "value": rowspan})
]
row_es.append(es)
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, f"row{r}", f"col{c}"]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
row_dict = {
"type": "td",
"value": value,
"class": " ".join(cs),
"display_value": formatter(value),
"is_visible": (c not in hidden_columns),
}
# only add an id if the cell has a style
if self.cell_ids or not (len(ctx[r, c]) == 1 and ctx[r, c][0] == ""):
row_dict["id"] = "_".join(cs[1:])
row_es.append(row_dict)
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(tuple(x.split(":")))
else:
props.append(("", ""))
cellstyle_map[tuple(props)].append(f"row{r}_col{c}")
body.append(row_es)
cellstyle = [
{"props": list(props), "selectors": selectors}
for props, selectors in cellstyle_map.items()
]
table_attr = self.table_attributes
use_mathjax = get_option("display.html.use_mathjax")
if not use_mathjax:
table_attr = table_attr or ""
if 'class="' in table_attr:
table_attr = table_attr.replace('class="', 'class="tex2jax_ignore ')
else:
table_attr += ' class="tex2jax_ignore"'
return dict(
head=head,
cellstyle=cellstyle,
body=body,
uuid=uuid,
precision=precision,
table_styles=table_styles,
caption=caption,
table_attributes=table_attr,
)
def format(self, formatter, subset=None, na_rep: Optional[str] = None) -> "Styler":
"""
Format the text display value of cells.
Parameters
----------
formatter : str, callable, dict or None
If ``formatter`` is None, the default formatter is used
subset : IndexSlice
An argument to ``DataFrame.loc`` that restricts which elements
``formatter`` is applied to.
na_rep : str, optional
Representation for missing values.
If ``na_rep`` is None, no special formatting is applied
.. versionadded:: 1.0.0
Returns
-------
self : Styler
Notes
-----
``formatter`` is either an ``a`` or a dict ``{column name: a}`` where
``a`` is one of
- str: this will be wrapped in: ``a.format(x)``
- callable: called with the value of an individual cell
The default display value for numeric values is the "general" (``g``)
format with ``pd.options.display.precision`` precision.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])
>>> df.style.format("{:.2%}")
>>> df['c'] = ['a', 'b', 'c', 'd']
>>> df.style.format({'c': str.upper})
"""
if formatter is None:
assert self._display_funcs.default_factory is not None
formatter = self._display_funcs.default_factory()
if subset is None:
row_locs = range(len(self.data))
col_locs = range(len(self.data.columns))
else:
subset = _non_reducing_slice(subset)
if len(subset) == 1:
subset = subset, self.data.columns
sub_df = self.data.loc[subset]
row_locs = self.data.index.get_indexer_for(sub_df.index)
col_locs = self.data.columns.get_indexer_for(sub_df.columns)
if is_dict_like(formatter):
for col, col_formatter in formatter.items():
# formatter must be callable, so '{}' are converted to lambdas
col_formatter = _maybe_wrap_formatter(col_formatter, na_rep)
col_num = self.data.columns.get_indexer_for([col])[0]
for row_num in row_locs:
self._display_funcs[(row_num, col_num)] = col_formatter
else:
# single scalar to format all cells with
formatter = _maybe_wrap_formatter(formatter, na_rep)
locs = product(*(row_locs, col_locs))
for i, j in locs:
self._display_funcs[(i, j)] = formatter
return self
def render(self, **kwargs) -> str:
"""
Render the built up styles to HTML.
Parameters
----------
**kwargs
Any additional keyword arguments are passed
through to ``self.template.render``.
This is useful when you need to provide
additional variables for a custom template.
Returns
-------
rendered : str
The rendered HTML.
Notes
-----
``Styler`` objects have defined the ``_repr_html_`` method
which automatically calls ``self.render()`` when it's the
last item in a Notebook cell. When calling ``Styler.render()``
directly, wrap the result in ``IPython.display.HTML`` to view
the rendered HTML in the notebook.
Pandas uses the following keys in render. Arguments passed
in ``**kwargs`` take precedence, so think carefully if you want
to override them:
* head
* cellstyle
* body
* uuid
* precision
* table_styles
* caption
* table_attributes
"""
self._compute()
# TODO: namespace all the pandas keys
d = self._translate()
# filter out empty styles, every cell will have a class
# but the list of props may just be [['', '']].
# so we have the neested anys below
trimmed = [x for x in d["cellstyle"] if any(any(y) for y in x["props"])]
d["cellstyle"] = trimmed
d.update(kwargs)
return self.template.render(**d)
def _update_ctx(self, attrs: DataFrame) -> None:
"""
Update the state of the Styler.
Collects a mapping of {index_label: ['<property>: <value>']}.
Parameters
----------
attrs : DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
"""
for row_label, v in attrs.iterrows():
for col_label, col in v.items():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(";").split(";"):
self.ctx[(i, j)].append(pair)
def _copy(self, deepcopy: bool = False) -> "Styler":
styler = Styler(
self.data,
precision=self.precision,
caption=self.caption,
uuid=self.uuid,
table_styles=self.table_styles,
na_rep=self.na_rep,
)
if deepcopy:
styler.ctx = copy.deepcopy(self.ctx)
styler._todo = copy.deepcopy(self._todo)
else:
styler.ctx = self.ctx
styler._todo = self._todo
return styler
def __copy__(self) -> "Styler":
"""
Deep copy by default.
"""
return self._copy(deepcopy=False)
def __deepcopy__(self, memo) -> "Styler":
return self._copy(deepcopy=True)
def clear(self) -> None:
"""
Reset the styler, removing any previously applied styles.
Returns None.
"""
self.ctx.clear()
self._todo = []
def _compute(self):
"""
Execute the style functions built up in `self._todo`.
Relies on the conventions that all style functions go through
.apply or .applymap. The append styles to apply as tuples of
(application method, *args, **kwargs)
"""
r = self
for func, args, kwargs in self._todo:
r = func(self)(*args, **kwargs)
return r
def _apply(
self,
func: Callable[..., "Styler"],
axis: Optional[Axis] = 0,
subset=None,
**kwargs,
) -> "Styler":
subset = slice(None) if subset is None else subset
subset = _non_reducing_slice(subset)
data = self.data.loc[subset]
if axis is not None:
result = data.apply(func, axis=axis, result_type="expand", **kwargs)
result.columns = data.columns
else:
result = func(data, **kwargs)
if not isinstance(result, pd.DataFrame):
raise TypeError(
f"Function {repr(func)} must return a DataFrame when "
f"passed to `Styler.apply` with axis=None"
)
if not (
result.index.equals(data.index) and result.columns.equals(data.columns)
):
raise ValueError(
f"Result of {repr(func)} must have identical "
f"index and columns as the input"
)
result_shape = result.shape
expected_shape = self.data.loc[subset].shape
if result_shape != expected_shape:
raise ValueError(
f"Function {repr(func)} returned the wrong shape.\n"
f"Result has shape: {result.shape}\n"
f"Expected shape: {expected_shape}"
)
self._update_ctx(result)
return self
def apply(
self,
func: Callable[..., "Styler"],
axis: Optional[Axis] = 0,
subset=None,
**kwargs,
) -> "Styler":
"""
Apply a function column-wise, row-wise, or table-wise.
Updates the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``.
axis : {0 or 'index', 1 or 'columns', None}, default 0
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
subset : IndexSlice
A valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice.
**kwargs : dict
Pass along to ``func``.
Returns
-------
self : Styler
Notes
-----
The output shape of ``func`` should match the input, i.e. if
``x`` is the input row, column, or table (depending on ``axis``),
then ``func(x).shape == x.shape`` should be true.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x):
... return ['background-color: yellow' if v == x.max() else ''
for v in x]
...
>>> df = pd.DataFrame(np.random.randn(5, 2))
>>> df.style.apply(highlight_max)
"""
self._todo.append(
(lambda instance: getattr(instance, "_apply"), (func, axis, subset), kwargs)
)
return self
def _applymap(self, func: Callable, subset=None, **kwargs) -> "Styler":
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
subset = _non_reducing_slice(subset)
result = self.data.loc[subset].applymap(func)
self._update_ctx(result)
return self
def applymap(self, func: Callable, subset=None, **kwargs) -> "Styler":
"""
Apply a function elementwise.
Updates the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a scalar.
subset : IndexSlice
A valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice.
**kwargs : dict
Pass along to ``func``.
Returns
-------
self : Styler
See Also
--------
Styler.where
"""
self._todo.append(
(lambda instance: getattr(instance, "_applymap"), (func, subset), kwargs)
)
return self
def where(
self,
cond: Callable,
value: str,
other: Optional[str] = None,
subset=None,
**kwargs,
) -> "Styler":
"""
Apply a function elementwise.
Updates the HTML representation with a style which is
selected in accordance with the return value of a function.
.. versionadded:: 0.21.0
Parameters
----------
cond : callable
``cond`` should take a scalar and return a boolean.
value : str
Applied when ``cond`` returns true.
other : str
Applied when ``cond`` returns false.
subset : IndexSlice
A valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice.
**kwargs : dict
Pass along to ``cond``.
Returns
-------
self : Styler
See Also
--------
Styler.applymap
"""
if other is None:
other = ""
return self.applymap(
lambda val: value if cond(val) else other, subset=subset, **kwargs
)
def set_precision(self, precision: int) -> "Styler":
"""
Set the precision used to render.
Parameters
----------
precision : int
Returns
-------
self : Styler
"""
self.precision = precision
return self
def set_table_attributes(self, attributes: str) -> "Styler":
"""
Set the table attributes.
These are the items that show up in the opening ``<table>`` tag
in addition to to automatic (by default) id.
Parameters
----------
attributes : str
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_attributes('class="pure-table"')
# ... <table class="pure-table"> ...
"""
self.table_attributes = attributes
return self
def export(self) -> List[Tuple[Callable, Tuple, Dict]]:
"""
Export the styles to applied to the current Styler.
Can be applied to a second style with ``Styler.use``.
Returns
-------
styles : list
See Also
--------
Styler.use
"""
return self._todo
def use(self, styles: List[Tuple[Callable, Tuple, Dict]]) -> "Styler":
"""
Set the styles on the current Styler.
Possibly uses styles from ``Styler.export``.
Parameters
----------
styles : list
List of style functions.
Returns
-------
self : Styler
See Also
--------
Styler.export
"""
self._todo.extend(styles)
return self
def set_uuid(self, uuid: str) -> "Styler":
"""
Set the uuid for a Styler.
Parameters
----------
uuid : str
Returns
-------
self : Styler
"""
self.uuid = uuid
return self
def set_caption(self, caption: str) -> "Styler":
"""
Set the caption on a Styler.
Parameters
----------
caption : str
Returns
-------
self : Styler
"""
self.caption = caption
return self
def set_table_styles(self, table_styles) -> "Styler":
"""
Set the table styles on a Styler.
These are placed in a ``<style>`` tag before the generated HTML table.
Parameters
----------
table_styles : list
Each individual table_style should be a dictionary with
``selector`` and ``props`` keys. ``selector`` should be a CSS
selector that the style will be applied to (automatically
prefixed by the table's UUID) and ``props`` should be a list of
tuples with ``(attribute, value)``.
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... )
"""
self.table_styles = table_styles
return self
def set_na_rep(self, na_rep: str) -> "Styler":
"""
Set the missing data representation on a Styler.
.. versionadded:: 1.0.0
Parameters
----------
na_rep : str
Returns
-------
self : Styler
"""
self.na_rep = na_rep
return self
def hide_index(self) -> "Styler":
"""
Hide any indices from rendering.
.. versionadded:: 0.23.0
Returns
-------
self : Styler
"""
self.hidden_index = True
return self
def hide_columns(self, subset) -> "Styler":
"""
Hide columns from rendering.
.. versionadded:: 0.23.0
Parameters
----------
subset : IndexSlice
An argument to ``DataFrame.loc`` that identifies which columns
are hidden.
Returns
-------
self : Styler
"""
subset =
|
_non_reducing_slice(subset)
|
pandas.core.indexing._non_reducing_slice
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/2 23:26
Desc: 东方财富网-行情首页-沪深京 A 股
"""
import requests
import pandas as pd
def stock_zh_a_spot_em() -> pd.DataFrame:
"""
东方财富网-沪深京 A 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://82.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80,m:1 t:2,m:1 t:23,m:0 t:81 s:2048",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["量比"] = pd.to_numeric(temp_df["量比"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
def stock_zh_b_spot_em() -> pd.DataFrame:
"""
东方财富网- B 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://28.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:7,m:1 t:3",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["量比"] = pd.to_numeric(temp_df["量比"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
def code_id_map_em() -> dict:
"""
东方财富-股票和市场代码
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 股票和市场代码
:rtype: dict
"""
url = "http://80.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:1 t:2,m:1 t:23",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df["market_id"] = 1
temp_df.columns = ["sh_code", "sh_id"]
code_id_dict = dict(zip(temp_df["sh_code"], temp_df["sh_id"]))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["sz_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["sz_id"])))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:81 s:2048",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["bj_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["bj_id"])))
return code_id_dict
def stock_zh_a_hist(
symbol: str = "000001",
period: str = "daily",
start_date: str = "19700101",
end_date: str = "20500101",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param period: choice of {'daily', 'weekly', 'monthly'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param adjust: choice of {"qfq": "前复权", "hfq": "后复权", "": "不复权"}
:type adjust: str
:return: 每日行情
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
adjust_dict = {"qfq": "1", "hfq": "2", "": "0"}
period_dict = {"daily": "101", "weekly": "102", "monthly": "103"}
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61,f116",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"klt": period_dict[period],
"fqt": adjust_dict[adjust],
"secid": f"{code_id_dict[symbol]}.{symbol}",
"beg": start_date,
"end": end_date,
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["klines"]:
return pd.DataFrame()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["日期"])
temp_df.reset_index(inplace=True, drop=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
return temp_df
def stock_zh_a_hist_min_em(
symbol: str = "000001",
start_date: str = "1979-09-01 09:32:00",
end_date: str = "2222-01-01 09:32:00",
period: str = "5",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日分时行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param period: choice of {'1', '5', '15', '30', '60'}
:type period: str
:param adjust: choice of {'', 'qfq', 'hfq'}
:type adjust: str
:return: 每日分时行情
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
adjust_map = {
"": "0",
"qfq": "1",
"hfq": "2",
}
if period == "1":
url = "https://push2his.eastmoney.com/api/qt/stock/trends2/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"ndays": "5",
"iscr": "0",
"secid": f"{code_id_dict[symbol]}.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.t
|
o_numeric(temp_df["开盘"])
|
pandas.to_numeric
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import math
import itertools
import pandas as pd
import datetime
from fclib.dataset.retail.benchmark_paths import DATA_DIR
import fclib.dataset.retail.benchmark_settings as bs
# Utility functions
def week_of_month(dt):
"""Get the week of the month for the specified date.
Args:
dt (Datetime): Input date
Returns:
wom (Integer): Week of the month of the input date
"""
from math import ceil
first_day = dt.replace(day=1)
dom = dt.day
adjusted_dom = dom + first_day.weekday()
wom = int(ceil(adjusted_dom / 7.0))
return wom
def lagged_features(df, lags):
"""Create lagged features based on time series data.
Args:
df (Dataframe): Input time series data sorted by time
lags (List): Lag lengths
Returns:
fea (Dataframe): Lagged features
"""
df_list = []
for lag in lags:
df_shifted = df.shift(lag)
df_shifted.columns = [x + "_lag" + str(lag) for x in df_shifted.columns]
df_list.append(df_shifted)
fea = pd.concat(df_list, axis=1)
return fea
def moving_averages(df, start_step, window_size=None):
"""Compute averages of every feature over moving time windows.
Args:
df (Dataframe): Input features as a dataframe
Returns:
fea (Dataframe): Dataframe consisting of the moving averages
"""
if window_size is None:
# Use a large window to compute average over all historical data
window_size = df.shape[0]
fea = df.shift(start_step).rolling(min_periods=1, center=False, window=window_size).mean()
fea.columns = fea.columns + "_mean" + str(window_size)
return fea
if __name__ == "__main__":
for submission_round in range(1, bs.NUM_ROUNDS + 1):
print("creating features for round {}...".format(submission_round))
# read in data
train_file = os.path.join(DATA_DIR, "train/train_round_{}.csv".format(submission_round))
aux_file = os.path.join(DATA_DIR, "train/aux_round_{}.csv".format(submission_round))
train_df = pd.read_csv(train_file, index_col=False)
aux_df = pd.read_csv(aux_file, index_col=False)
# calculate move
train_df["move"] = train_df["logmove"].apply(lambda x: round(math.exp(x)))
train_df = train_df[["store", "brand", "week", "profit", "move", "logmove"]]
# merge train_df with aux_df
all_df =
|
pd.merge(train_df, aux_df, how="right", on=["store", "brand", "week"])
|
pandas.merge
|
#!/usr/bin/env python
# coding: utf-8
# In[10]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_auc_score
# In[11]:
df =
|
pd.read_csv('/Users/priya/Downloads/train.csv')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
import pandas as pd
from .binary_reader import BinaryReader
from .parsing_util import all_entries_postal, parse_date_num, localize_datenum
class BinaryParser(object):
def __init__(self, binary_reader, na_values):
self.binary_reader = binary_reader
self.na_values = na_values
def parse(self, parameters, is_station=False, coordinate_list=None):
is_postal = all_entries_postal(coordinate_list)
if is_station:
return self._parse_station(parameters[:], coordinate_list)
elif is_postal:
return self._parse_postal(parameters[:], coordinate_list)
else:
return self._parse_latlon(parameters[:], coordinate_list)
def _parse_station(self, parameters, coordinate_list):
parameters.extend(["station_id"])
return self._parse_internal(parameters, 'station', coordinate_list)
def _parse_postal(self, parameters, coordinate_list):
parameters.extend(['postal_code'])
return self._parse_internal(parameters, 'postal', coordinate_list)
def _parse_latlon(self, parameters, coordinate_list):
# add lat, lon in the list of parameters
parameters.extend(["lat", "lon"])
return self._parse_internal(parameters, 'latlon', coordinate_list)
def _parse_internal(self, parameters, parse_type, coordinate_list):
dfs = []
# parse response
num_of_coords = self.binary_reader.get_int() if len(coordinate_list) > 1 else 1
for i in range(num_of_coords):
dict_data = {}
num_of_dates = self.binary_reader.get_int()
for _ in range(num_of_dates):
num_of_params = self.binary_reader.get_int()
date = self.binary_reader.get_double()
if parse_type == 'station':
latlon = [coordinate_list[i]]
elif parse_type == 'postal':
latlon = [coordinate_list[i]]
else:
latlon = coordinate_list[i]
# ensure tuple
latlon = tuple(latlon)
value = self.binary_reader.get_double(num_of_params)
if type(value) is not tuple:
value = (value,)
dict_data[date] = value + latlon
df =
|
pd.DataFrame.from_dict(dict_data, orient="index", columns=parameters)
|
pandas.DataFrame.from_dict
|
# The script for CS 298 AML MP1 Part 2
import pandas as pd
import numpy as np
from skimage.transform import resize
from sklearn import preprocessing
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.ensemble import RandomForestClassifier
df_tr = pd.read_csv('train.csv')
df_te = pd.read_csv('test.csv', header= None)
df_val = pd.read_csv('val.csv')
# Read the column names
# df.columns
df_tr = df_tr.drop(columns='Unnamed: 0') # Only Training data needs this
# Gettomg ready with training/validating/testing data
train_df = df_tr.drop(columns='label')
val_df = df_val.drop(columns='label')
train_labels = df_tr['label'].values
val_labels = df_val['label'].values
train_untouched = train_df.values
val_untouched = val_df.values
test_untouched = df_te.values
def bounding_scaling(pdframe: pd.DataFrame):
temp_pix = pdframe.values.reshape((28, 28))
location = np.where(temp_pix != 0)
# Getting Boundaries
t_top = np.min(location[0])
t_bottom = np.max(location[0])
t_left = np.min(location[1])
t_right = np.max(location[1])
cropped_img = temp_pix[t_top: t_bottom+1, t_left: t_right+1]
return resize(cropped_img, (20, 20), preserve_range=True).reshape((20*20,))
def get_prior(labels: np.ndarray):
num_labels = labels.shape[0]
prior_list = []
prior_list.append((np.where(labels == 0)[0].shape[0]*1.) / num_labels)
prior_list.append((np.where(labels == 1)[0].shape[0]*1.) / num_labels)
prior_list.append((np.where(labels == 2)[0].shape[0]*1.) / num_labels)
prior_list.append((np.where(labels == 3)[0].shape[0]*1.) / num_labels)
prior_list.append((np.where(labels == 4)[0].shape[0]*1.) / num_labels)
prior_list.append((np.where(labels == 5)[0].shape[0]*1.) / num_labels)
prior_list.append((np.where(labels == 6)[0].shape[0]*1.) / num_labels)
prior_list.append((np.where(labels == 7)[0].shape[0]*1.) / num_labels)
prior_list.append((np.where(labels == 8)[0].shape[0]*1.) / num_labels)
prior_list.append((np.where(labels == 9)[0].shape[0]*1.) / num_labels)
return np.array(prior_list)
train_prior = get_prior(train_labels)
train_df['rescaled'] = train_df.apply(bounding_scaling, axis=1)
val_df['rescaled'] = val_df.apply(bounding_scaling, axis=1)
df_te['rescaled'] = df_te.apply(bounding_scaling, axis=1)
train_scaled = np.vstack(train_df['rescaled'].values)
val_scaled = np.vstack(val_df['rescaled'].values)
test_scaled = np.vstack(df_te['rescaled'].values)
# Normalize the data
# train_features = preprocessing.scale(train_features)
#######################################################
# Begin Naive Bayes Training
g_nb_1 = GaussianNB()
g_nb_2 = GaussianNB()
b_nb_1 = BernoulliNB()
b_nb_2 = BernoulliNB()
# For untouched
g_nb_1.fit(train_untouched, train_labels)
b_nb_1.fit(train_untouched, train_labels)
# For streched
g_nb_2.fit(train_scaled, train_labels)
b_nb_2.fit(train_scaled, train_labels)
# Validating
print('Gaussian + untouched validation acc:' , g_nb_1.score(val_untouched, val_labels))
print('Gaussian + stretched validation acc:' , g_nb_2.score(val_scaled, val_labels))
print('Bernoulli + untouched validation acc:' , b_nb_1.score(val_untouched, val_labels))
print('Bernoulli + stretched validation acc:' , b_nb_2.score(val_scaled, val_labels))
# Deleting models
del g_nb_1, g_nb_2, b_nb_1, b_nb_2
# Stack training data with validating data
total_untouched = np.vstack((train_untouched, val_untouched))
total_scaled = np.vstack((train_scaled, val_scaled))
total_labels = np.concatenate((train_labels, val_labels))
# re-do Naive Bayes Training
g_nb_1 = GaussianNB()
g_nb_2 = GaussianNB()
b_nb_1 = BernoulliNB()
b_nb_2 = BernoulliNB()
# For untouched
g_nb_1.fit(total_untouched, total_labels)
b_nb_1.fit(total_untouched, total_labels)
# For streched
g_nb_2.fit(total_scaled, total_labels)
b_nb_2.fit(total_scaled, total_labels)
# Getting prediction and save it into .csv file with correct column label
# @NOTE: I am lazy so I won't do anything fancy
prediction_1 = g_nb_1.predict(test_untouched)
pred_1_df = pd.DataFrame(data={'Label': prediction_1})
pred_1_df.to_csv('cwu72_1.csv', index=True)
pred_1_df = pd.read_csv('cwu72_1.csv')
pred_1_df.columns = ['ImageId', 'Label']
pred_1_df.to_csv('cwu72_1.csv', index=False)
prediction_2 = g_nb_2.predict(test_scaled)
pred_2_df = pd.DataFrame(data={'Label': prediction_2})
pred_2_df.to_csv('cwu72_2.csv', index=True)
pred_2_df = pd.read_csv('cwu72_2.csv')
pred_2_df.columns = ['ImageId', 'Label']
pred_2_df.to_csv('cwu72_2.csv', index=False)
prediction_3 = b_nb_1.predict(test_untouched)
pred_3_df = pd.DataFrame(data={'Label': prediction_3})
pred_3_df.to_csv('cwu72_3.csv', index=True)
pred_3_df = pd.read_csv('cwu72_3.csv')
pred_3_df.columns = ['ImageId', 'Label']
pred_3_df.to_csv('cwu72_3.csv', index=False)
prediction_4 = b_nb_2.predict(test_scaled)
pred_4_df = pd.DataFrame(data={'Label': prediction_4})
pred_4_df.to_csv('cwu72_4.csv', index=True)
pred_4_df =
|
pd.read_csv('cwu72_4.csv')
|
pandas.read_csv
|
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
|
tm.assert_frame_equal(result, expected)
|
pandas._testing.assert_frame_equal
|
"""
Plot anomalies for a metric
"""
import logging
import os
import traceback
from time import time
import pandas as pd
from adtk.visualization import plot
import settings
from functions.metrics.get_metric_id_from_base_name import get_metric_id_from_base_name
from functions.database.queries.query_anomalies import get_anomalies
from functions.timeseries.determine_data_frequency import determine_data_frequency
from functions.graphite.get_metrics_timeseries import get_metrics_timeseries
skyline_app = 'webapp'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
# @added 20211125 - Feature #4326: webapp - panorama_plot_anomalies
def panorama_plot_anomalies(base_name, from_timestamp=None, until_timestamp=None):
"""
Create a plot of the metric with its anomalies and return the anomalies dict
and the path and filename
:param base_name: the name of the metric
:param from_timestamp: the from timestamp
:param until_timestamp: the until timestamp
:type base_name: str
:type from_timestamp: int
:type until_timestamp: int
:return: (anomalies_dict, path and file)
:rtype: tuple
"""
function_str = 'panorama_plot_anomalies'
logger.info('%s - base_name: %s, from_timestamp: %s, until_timestamp: %s' % (
function_str, str(base_name), str(from_timestamp),
str(until_timestamp)))
if not until_timestamp:
until_timestamp = int(time())
save_to_file = '%s/panorama_anomalies_plot.%s.%s.%s.png' % (
settings.SKYLINE_TMP_DIR, base_name, str(from_timestamp),
str(until_timestamp))
try:
metric_id = get_metric_id_from_base_name(skyline_app, base_name)
logger.info('%s - %s with metric id:%s' % (
function_str, str(base_name), str(metric_id)))
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: failed to determine metric id for %s - %s' % (
function_str, base_name, err))
raise
try:
anomalies_dict = get_anomalies(skyline_app, metric_id, params={'latest': False})
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: failed to determine anomalies for %s - %s' % (
function_str, base_name, err))
raise
if from_timestamp and anomalies_dict:
for anomaly_id in list(anomalies_dict.keys()):
if anomalies_dict[anomaly_id]['anomaly_timestamp'] < from_timestamp:
del anomalies_dict[anomaly_id]
if until_timestamp and anomalies_dict:
for anomaly_id in list(anomalies_dict.keys()):
if anomalies_dict[anomaly_id]['anomaly_timestamp'] > until_timestamp:
del anomalies_dict[anomaly_id]
if os.path.isfile(save_to_file):
return anomalies_dict, save_to_file
if not from_timestamp and anomalies_dict:
first_anomaly_id = list(anomalies_dict.keys())[-1]
first_anomaly_timestamp = anomalies_dict[first_anomaly_id]['anomaly_timestamp']
from_timestamp = first_anomaly_timestamp - (86400 * 7)
logger.info('%s :: the from_timestamp was not passed, calculated from the anomalies_dict as %s' % (
function_str, str(from_timestamp)))
if not from_timestamp and not anomalies_dict:
logger.info('%s :: the from_timestamp was not passed and no anomalies found for %s' % (
function_str, base_name))
from_timestamp = until_timestamp - (86400 * 7)
metrics_functions = {}
metrics_functions[base_name] = {}
metrics_functions[base_name]['functions'] = None
try:
metrics_timeseries = get_metrics_timeseries(skyline_app, metrics_functions, from_timestamp, until_timestamp, log=False)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: get_metrics_timeseries failed - %s' % (
function_str, err))
raise
try:
timeseries = metrics_timeseries[base_name]['timeseries']
# Truncate the first and last timestamp, just in case they are not
# filled buckets
timeseries = timeseries[1:-1]
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: failed to get timeseries for %s - %s' % (
function_str, base_name, err))
raise
unaligned_anomaly_timestamps = []
for anomaly_id in list(anomalies_dict.keys()):
unaligned_anomaly_timestamps.append(anomalies_dict[anomaly_id]['anomaly_timestamp'])
# Align anomalies to timeseries resolution
resolution = determine_data_frequency(skyline_app, timeseries, False)
anomaly_timestamps = []
for ts in unaligned_anomaly_timestamps:
anomaly_timestamps.append(int(int(ts) // resolution * resolution))
try:
df =
|
pd.DataFrame(timeseries, columns=['date', 'value'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from validada.slicers import iloc
import validada.functions.raising as ck
import validada.decorators.raising as dc
import datetime as dt
def _add_one(df):
return df + 1
def _safe_add_one(df):
return df.fillna(0.0) + 1
def _noop(df):
return df
def test_is_in_index():
dr = pd.date_range(start='2015-01-01', periods=6, freq='D')
df = pd.DataFrame(data = list(range(6)), index=dr)
d = dt.date(2015,1,3)
result = ck.has_in_index(df, obj=d)
tm.assert_frame_equal(df, result)
result = dc.has_in_index(obj=d)(_add_one)(df)
tm.assert_frame_equal(result, df + 1)
result = ck.has_in_index(df, obj=d, try_ix=True)
result = ck.has_in_index(df, obj=d, try_ix=True, try_strftime="%Y-%m")
result = ck.has_in_index(df, obj=d, check_na=True)
def test_is_in_index_raises():
dr = pd.date_range(start='2015-01-01', periods=6, freq='D')
da = list(range(6))
da[2] = pd.np.nan
df = pd.DataFrame(data = da, index=dr)
d = dt.date(2015,1,12)
with pytest.raises(AssertionError):
ck.has_in_index(df, obj=d)
with pytest.raises(AssertionError):
dc.has_in_index(obj=d)(_add_one)(df)
with pytest.raises(AssertionError):
ck.has_in_index(df, obj=d, try_ix=True)
ck.has_in_index(df, obj=d, try_ix=True, try_strftime="%Y-%m")
d = dt.datetime(2015,1,3)
ck.has_in_index(df, obj=d)
ck.has_in_index(df, obj=d, check_na=False)
with pytest.raises(AssertionError):
ck.has_in_index(df, obj=d, check_na=True)
def test_equal_columns_sum():
df = pd.DataFrame({'A': [1,2,3,4,5], 'B': [1,2,3,4,5]})
result = ck.equal_columns_sum(df)
tm.assert_frame_equal(df, result)
result = dc.equal_columns_sum()(_add_one)(df)
tm.assert_frame_equal(result, df + 1)
def test_equal_columns_sum_raises_slice():
df = pd.DataFrame({'A': [None,2,3,4,0], 'B': [1,2,3,4,None]})
with pytest.raises(AssertionError):
ck.equal_columns_sum(df)
with pytest.raises(AssertionError):
dc.equal_columns_sum()(_add_one)(df)
s = iloc[-3:]
result = ck.equal_columns_sum(df, s)
tm.assert_frame_equal(df, result)
result = dc.equal_columns_sum(s)(_safe_add_one)(df)
tm.assert_frame_equal(result, _safe_add_one(df))
def test_none_missing():
df = pd.DataFrame(np.random.randn(5, 3))
result = ck.none_missing(df)
tm.assert_frame_equal(df, result)
result = dc.none_missing()(_add_one)(df)
tm.assert_frame_equal(result, df + 1)
def test_none_missing_raises():
df = pd.DataFrame(np.random.randn(5, 3))
df.iloc[0, 0] = np.nan
with pytest.raises(AssertionError):
ck.none_missing(df)
with pytest.raises(AssertionError):
dc.none_missing()(_add_one)(df)
def test_monotonic_increasing_lax():
df = pd.DataFrame([1, 2, 2])
tm.assert_frame_equal(df, ck.is_monotonic(df, increasing=True))
result = dc.is_monotonic(increasing=True)(_add_one)(df)
tm.assert_frame_equal(result, df + 1)
df = pd.DataFrame([1, 2, 1])
with pytest.raises(AssertionError):
ck.is_monotonic(df, increasing=True)
with pytest.raises(AssertionError):
dc.is_monotonic(increasing=True)(_add_one)(df)
df = pd.DataFrame([3, 2, 1])
with pytest.raises(AssertionError):
ck.is_monotonic(df, increasing=True)
with pytest.raises(AssertionError):
dc.is_monotonic(increasing=True)(_add_one)(df)
def test_monotonic_increasing_strict():
df = pd.DataFrame([1, 2, 3])
tm.assert_frame_equal(df, ck.is_monotonic(df, increasing=True, strict=True))
result = dc.is_monotonic(increasing=True, strict=True)(_add_one)(df)
tm.assert_frame_equal(result, df + 1)
df = pd.DataFrame([1, 2, 2])
with pytest.raises(AssertionError):
ck.is_monotonic(df, increasing=True, strict=True)
with pytest.raises(AssertionError):
dc.is_monotonic(increasing=True, strict=True)(_add_one)(df)
df = pd.DataFrame([3, 2, 1])
with pytest.raises(AssertionError):
ck.is_monotonic(df, increasing=True, strict=True)
with pytest.raises(AssertionError):
dc.is_monotonic(increasing=True, strict=True)(_add_one)(df)
def test_monotonic_decreasing():
df =
|
pd.DataFrame([2, 2, 1])
|
pandas.DataFrame
|
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
import numpy as np
import pandas as pd
from hydroeval import *
from sklearn.metrics import mean_squared_error
from scipy.stats import ks_2samp
# Compute the root-mean-square error between each method and original run
def rmse(data):
df_rmse = pd.DataFrame()
for i in range(data.shape[1]):
df_rmse = df_rmse.append({'method':data.columns.values[i], 'rmse':np.sqrt(mean_squared_error(
data['Original'], data.iloc[:, i]))}, ignore_index=True, sort=False)
df_rmse = df_rmse.set_index('method')
df_rmse = df_rmse.sort_values(by=['rmse'])
return df_rmse
# Compute the pearson correlation between each method and original run
def pear_corr(data):
df_corr = pd.DataFrame()
for i in range(data.shape[1]):
df_corr = df_corr.append({'method':data.columns.values[i], 'corr':data.iloc[:, i].corr(
data['Original'], method='pearson')}, ignore_index=True, sort=False)
df_corr = df_corr.set_index('method')
df_corr = df_corr.sort_values(by=['corr'], ascending=False)
return df_corr
# Compute the correlation of percentage change between each method and original run
def pctch_corr(data):
df_pctch = data.pct_change()
df_pct_corr = pd.DataFrame()
for i in range(data.shape[1]):
df_pct_corr = df_pct_corr.append({'method':df_pctch.columns.values[i], 'pct_corr':df_pctch.iloc[:, i].corr(
df_pctch['Original'])}, ignore_index=True, sort=False)
df_pct_corr = df_pct_corr.set_index('method')
df_pct_corr = df_pct_corr.sort_values(by=['pct_corr'], ascending=False)
return df_pct_corr
# Compute the Kolmogorov-Smirnov statistic between each method and original run
def kolmo_smir(data):
df_ks = pd.DataFrame()
for i in range(data.shape[1]):
s, p = ks_2samp(data.iloc[:, i], data['Original'])
df_ks = df_ks.append({'method':data.columns.values[i], 'statistic':s, 'pvalue':p},
ignore_index=True, sort=False)
df_ks = df_ks.set_index('method')
df_ks = df_ks.sort_values(by=['pvalue'], ascending=False)
return df_ks
# an evaluation summary table
def sum_table(data, forcing, outputpath):
df_forcing = pd.DataFrame(forcing, columns=['forcing'])
df_forcing = df_forcing.set_index(data.index.values)
df_woforc = data.loc[df_forcing['forcing'] == 0.00]
df_wforc = data.loc[df_forcing['forcing'] != 0.00]
df_table =
|
pd.DataFrame(index=data.columns.values)
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.