prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 8 13:52:29 2019
@author: <NAME>
"""
import numpy as np
import pandas as pd
import math
# Selects most reliable values among the multiple occurences
def curate(data_merged):
smiles_list= data_merged['SMILES'].tolist()
solubility_list= data_merged['Solubility'].tolist()
id_list= data_merged['ID'].tolist()
inchi_list= data_merged['InChI'].tolist()
name_list= data_merged['Name'].tolist()
prediction_list= data_merged['Prediction'].tolist()
# define variables and assign default values
dif_val_list = []
dif_sol_list = []
same_value_counter=0 #same molecules with same values
different_value_counter_2times=0 #same molecules with different values (2 occurences)
different_value_counter_mutiple=0 #same molecules with different values (more than 2 occurences)
ocurrence_count=[-999]*len(id_list)
SD=[-999]*len(id_list)
reliability_group=["-"]*len(id_list)
selected_list=[0]*len(id_list)
# First step: Remove same molecules with same solubility values (change their SMILES into "XXX")
for i in range(0,len(id_list)):
same_value_List=[]
if(smiles_list[i] != "XXX" ):
same_value_List.append(i)
# collect same molecules with in range of 0.01 solubility value
for j in range(i+1,len(id_list)):
if(inchi_list[i]==inchi_list[j]):
if(math.fabs(solubility_list[i]-solubility_list[j])<=0.01):
same_value_List.append(j)
# select the best source according to: 1:name existance 2: size of the dataset (already in ordered according to size)
if(len(same_value_List)>1):
bestId=same_value_List[0]
for sameId in same_value_List:
if((pd.isnull(name_list[bestId]) or name_list[bestId]=="-") and ( not pd.isnull(name_list[sameId]) and name_list[sameId]!="-")):
bestId=sameId
same_value_List.remove(bestId)
for sameId in same_value_List:
smiles_list[sameId]="XXX"
same_value_counter=same_value_counter+1
print ("Total removed same molecule with same value: "+str(same_value_counter))
# Second step: Select the most reliable solubility value among the same molecules (change unselected SMILES into XXX)
for i in range(0,len(id_list)):
same_molecule_List=[]
# collect same molecules with different solubility value
if(smiles_list[i] != "XXX" and selected_list[i]==0):
same_molecule_List.append(i)
for j in range(i+1,len(id_list)):
if(smiles_list[j] != "XXX" and inchi_list[i]==inchi_list[j]):
same_molecule_List.append(j)
# if occurrence count=1 (Group:G1)
if(len(same_molecule_List)==1):
selected_list[i]=1
reliability_group[i]="G1"
SD[i]=0
ocurrence_count[i]=1
# if occurrence count = 2 (closest to reference (prediction) method )
elif(len(same_molecule_List)==2):
# calculate difference betweeen values and prediction (tie breaker)
diff1=math.fabs(solubility_list[same_molecule_List[0]]-prediction_list[same_molecule_List[0]])
diff2=math.fabs(solubility_list[same_molecule_List[1]]-prediction_list[same_molecule_List[1]])
bestId=same_molecule_List[0]
if(diff1<=diff2):
smiles_list[same_molecule_List[1]]="XXX"
different_value_counter_2times=different_value_counter_2times+1
bestId=same_molecule_List[0]
selected_list[bestId]=1
else:
smiles_list[same_molecule_List[0]]="XXX"
different_value_counter_2times=different_value_counter_2times+1
bestId=same_molecule_List[1]
selected_list[bestId]=1
# decide reliability group (if SD>0.5 Group:G2, else Group:G3)
diff=math.fabs(solubility_list[same_molecule_List[0]]-solubility_list[same_molecule_List[1]])
if(diff>1):
reliability_group[bestId]="G2"
else:
reliability_group[bestId]="G3"
# store differences and SD and occurrence count
SD[bestId]=diff/2
ocurrence_count[bestId]=2
# if occurrence count > 2 (closest to mean method )
elif(len(same_molecule_List)>2):
total=0
different_solubility_values_list=[]
for sameId in same_molecule_List:
total=total+solubility_list[sameId]
different_solubility_values_list.append(solubility_list[sameId])
mean=total / len(same_molecule_List)
bestId=same_molecule_List[0]
bestDiff=999
for sameId in same_molecule_List:
diff=math.fabs(solubility_list[sameId]-mean)
if(diff<bestDiff):
bestId=sameId
bestDiff=diff
selected_list[bestId]=1
std=np.std(different_solubility_values_list, axis=0)
SD[bestId]=std
ocurrence_count[bestId]=len(same_molecule_List)
# decide reliability group (if SD>0.5 Group:G4, else Group:G5)
if(std>0.5):
reliability_group[bestId]="G4"
else:
reliability_group[bestId]="G5"
same_molecule_List.remove(bestId)
for sameId in same_molecule_List:
smiles_list[sameId]="XXX"
different_value_counter_mutiple=different_value_counter_mutiple+1
# add reliability information to curated dataset and filter duplicates
data_merged['SD']= | pd.Series(SD) | pandas.Series |
import preprocess
import DimationReduction
from sklearn.cluster import KMeans
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.cluster import AgglomerativeClustering
import plotly.express as px
from sklearn.mixture import GaussianMixture
from sklearn.metrics import silhouette_samples, silhouette_score, fowlkes_mallows_score
dataPath = r"C:\Users\shalev\Desktop\Introduction_to_AI\Introduction-to-AI\Data\mushrooms_data.csv"
reducedDataPath = r"C:\Users\shalev\Desktop\Introduction_to_AI\Introduction-to-AI\Data\reduced_data.csv"
class Clustering:
def __init__(self, method, dimensionReduction="no"):
'''
:param method:
:param dataPath:
'''
self.dimensionReduction = dimensionReduction
self.reducedData = | pd.read_csv(reducedDataPath) | pandas.read_csv |
"""Amazon S3 Read Module (PRIVATE)."""
import concurrent.futures
import datetime
import itertools
import logging
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
import boto3 # type: ignore
import pandas as pd # type: ignore
import pandas.io.parsers # type: ignore
import pyarrow as pa # type: ignore
import pyarrow.lib # type: ignore
import pyarrow.parquet # type: ignore
import s3fs # type: ignore
from pandas.io.common import infer_compression # type: ignore
from awswrangler import _data_types, _utils, catalog, exceptions
from awswrangler.s3._list import path2list
_logger: logging.Logger = logging.getLogger(__name__)
def read_parquet_metadata_internal(
path: Union[str, List[str]],
dtype: Optional[Dict[str, str]],
sampling: float,
dataset: bool,
path_suffix: Optional[str],
use_threads: bool,
boto3_session: Optional[boto3.Session],
) -> Tuple[Dict[str, str], Optional[Dict[str, str]], Optional[Dict[str, List[str]]]]:
"""Handle wr.s3.read_parquet_metadata internally."""
session: boto3.Session = _utils.ensure_session(session=boto3_session)
if dataset is True:
if isinstance(path, str):
_path: Optional[str] = path if path.endswith("/") else f"{path}/"
paths: List[str] = path2list(path=_path, boto3_session=session, suffix=path_suffix)
else: # pragma: no cover
raise exceptions.InvalidArgumentType("Argument <path> must be str if dataset=True.")
else:
if isinstance(path, str):
_path = None
paths = path2list(path=path, boto3_session=session, suffix=path_suffix)
elif isinstance(path, list):
_path = None
paths = path
else: # pragma: no cover
raise exceptions.InvalidArgumentType(f"Argument path must be str or List[str] instead of {type(path)}.")
schemas: List[Dict[str, str]] = [
_read_parquet_metadata_file(path=x, use_threads=use_threads, boto3_session=session)
for x in _utils.list_sampling(lst=paths, sampling=sampling)
]
_logger.debug("schemas: %s", schemas)
columns_types: Dict[str, str] = {}
for schema in schemas:
for column, _dtype in schema.items():
if (column in columns_types) and (columns_types[column] != _dtype): # pragma: no cover
raise exceptions.InvalidSchemaConvergence(
f"Was detect at least 2 different types in column {column} ({columns_types[column]} and {dtype})."
)
columns_types[column] = _dtype
partitions_types: Optional[Dict[str, str]] = None
partitions_values: Optional[Dict[str, List[str]]] = None
if (dataset is True) and (_path is not None):
partitions_types, partitions_values = _utils.extract_partitions_metadata_from_paths(path=_path, paths=paths)
if dtype:
for k, v in dtype.items():
if columns_types and k in columns_types:
columns_types[k] = v
if partitions_types and k in partitions_types:
partitions_types[k] = v
_logger.debug("columns_types: %s", columns_types)
return columns_types, partitions_types, partitions_values
def _read_text(
parser_func: Callable,
path: Union[str, List[str]],
use_threads: bool = True,
last_modified_begin: Optional[datetime.datetime] = None,
last_modified_end: Optional[datetime.datetime] = None,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
dataset: bool = False,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
if "iterator" in pandas_kwargs:
raise exceptions.InvalidArgument("Please, use chunksize instead of iterator.")
session: boto3.Session = _utils.ensure_session(session=boto3_session)
if (dataset is True) and (not isinstance(path, str)): # pragma: no cover
raise exceptions.InvalidArgument("The path argument must be a string Amazon S3 prefix if dataset=True.")
if dataset is True:
path_root: str = str(path)
else:
path_root = ""
paths: List[str] = path2list(
path=path, boto3_session=session, last_modified_begin=last_modified_begin, last_modified_end=last_modified_end
)
if len(paths) < 1:
raise exceptions.InvalidArgument("No files Found.")
_logger.debug("paths:\n%s", paths)
if chunksize is not None:
dfs: Iterator[pd.DataFrame] = _read_text_chunksize(
parser_func=parser_func,
paths=paths,
boto3_session=session,
chunksize=chunksize,
pandas_kwargs=pandas_kwargs,
s3_additional_kwargs=s3_additional_kwargs,
dataset=dataset,
path_root=path_root,
)
return dfs
if use_threads is False:
df: pd.DataFrame = pd.concat(
objs=[
_read_text_full(
parser_func=parser_func,
path=p,
boto3_session=session,
pandas_kwargs=pandas_kwargs,
s3_additional_kwargs=s3_additional_kwargs,
dataset=dataset,
path_root=path_root,
)
for p in paths
],
ignore_index=True,
sort=False,
)
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
df = pd.concat(
objs=executor.map(
_read_text_full,
itertools.repeat(parser_func),
itertools.repeat(path_root),
paths,
itertools.repeat(_utils.boto3_to_primitives(boto3_session=session)), # Boto3.Session
itertools.repeat(pandas_kwargs),
itertools.repeat(s3_additional_kwargs),
itertools.repeat(dataset),
),
ignore_index=True,
sort=False,
)
return df
def _read_text_chunksize(
parser_func: Callable,
path_root: str,
paths: List[str],
boto3_session: boto3.Session,
chunksize: int,
pandas_kwargs: Dict[str, Any],
s3_additional_kwargs: Optional[Dict[str, str]] = None,
dataset: bool = False,
) -> Iterator[pd.DataFrame]:
fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
for path in paths:
_logger.debug("path: %s", path)
partitions: Dict[str, Any] = {}
if dataset is True:
partitions = _utils.extract_partitions_from_path(path_root=path_root, path=path)
if pandas_kwargs.get("compression", "infer") == "infer":
pandas_kwargs["compression"] = | infer_compression(path, compression="infer") | pandas.io.common.infer_compression |
# Template for the second internal Kaggle competition
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Path to csv files
path_train = "/home/kacper/Desktop/train.csv"
path_test = "/home/kacper/Desktop/test.csv"
# Read Data
x_train = pd.read_csv(path_train)
y = x_train['class']
x_test = | pd.read_csv(path_test) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 20 14:54:41 2020
@author: aschauer
"""
import socket
import pandas as pd
from pathlib import Path
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Float
# store locally if on my machine (database loses connection to H: drive)
if socket.gethostname() == 'SLFPC954':
CV_RESULTS_DIR = Path(r'D:\\HS_gap_filling_paper\results\cross_validation')
CV_MODELED_SERIES_DIR = CV_RESULTS_DIR / 'modeled_series'
else:
PROJECT_DIR = Path(__file__).parent.parent
CV_RESULTS_DIR = PROJECT_DIR / 'results' / 'cross_validation'
CV_MODELED_SERIES_DIR = CV_RESULTS_DIR / 'modeled_series'
for d in [CV_RESULTS_DIR, CV_MODELED_SERIES_DIR]:
if not d.exists():
d.mkdir(parents=True, exist_ok=True)
DBFILE = CV_RESULTS_DIR / 'cv_scores_database.db'
Base = declarative_base()
class ModeledGap(Base):
__tablename__ = "modeled_gaps"
gap_stn = Column(String, primary_key=True)
fill_method = Column(String, primary_key=True)
station_grid = Column(String, primary_key=True)
gap_type = Column(String, primary_key=True)
gap_winter = Column(Integer, primary_key=True)
gap_start = Column(String, primary_key=True)
gap_end = Column(String, primary_key=True)
train_start = Column(String, primary_key=True)
train_end = Column(String, primary_key=True)
gap_stn_altitude = Column(Integer)
HS_true_file = Column(String) # file reference to pickled Series
HS_pred_file = Column(String) # file reference to pickled Series
HSavg_true = Column(Float)
HSavg_pred = Column(Float)
dHS1_true = Column(Float)
dHS1_pred = Column(Float)
HSmax_true = Column(Float)
HSmax_pred = Column(Float)
RMSE = Column(Float)
RMSE_nonzero = Column(Float)
RMSE_nonzero_true = Column(Float)
RMSE_nonzero_pred = Column(Float)
MAAPE = Column(Float)
MAAPE_nonzero = Column(Float)
MAAPE_nonzero_true = Column(Float)
MAAPE_nonzero_pred = Column(Float)
bias = Column(Float)
HSavg_diff = Column(Float)
HSavg_abs_diff = Column(Float)
HSavg_relative_diff = Column(Float)
HSavg_relative_abs_diff = Column(Float)
dHS1_diff = Column(Float)
dHS1_abs_diff = Column(Float)
dHS1_relative_diff = Column(Float)
dHS1_relative_abs_diff = Column(Float)
HSmax_diff = Column(Float)
HSmax_abs_diff = Column(Float)
HSmax_relative_diff = Column(Float)
HSmax_relative_abs_diff = Column(Float)
r2_score = Column(Float)
r2_score_nonzero = Column(Float)
r2_score_nonzero_true = Column(Float)
r2_score_nonzero_pred = Column(Float)
def create_file_references(self):
gap_base = f"{self.gap_stn}_{self.gap_type}_{self.gap_start}-{self.gap_end}"
model_base = f"{self.fill_method}_{self.station_grid}_{self.train_start}-{self.train_end}"
setattr(self, 'HS_true_file',
str(CV_MODELED_SERIES_DIR / f"_y_true_{gap_base}.pkl"))
setattr(self, 'HS_pred_file',
str(CV_MODELED_SERIES_DIR / f"_y_pred_{gap_base}_{model_base}.pkl"))
engine = sa.create_engine(f'sqlite:///{DBFILE}', echo=False)
Base.metadata.create_all(engine)
def make_session():
session_factory = sa.orm.sessionmaker()
session_factory.configure(bind=engine)
session = session_factory()
return session
def get_cv_results_as_df():
return pd.read_sql('modeled_gaps', engine)
def get_predictions_from_one_gap_as_df(gap_stn,
gap_winter,
fill_methods=None,
station_grids=None):
"""
Query predictions and true data from one gap and gap station and concatenate
result into a single dataframe.
Parameters
----------
gap_stn : str
gap_winter : int
fill_methods : list or tuple, optional
The default is None and queries all methods.
station_grids : list or tuple, optional
The default is None and queris all station grids.
Returns
-------
out_df : TYPE
DESCRIPTION.
"""
query = f"""select *
from modeled_gaps
where gap_winter=?
and gap_stn=?"""
res = | pd.read_sql(query, engine, params=[gap_winter, gap_stn]) | pandas.read_sql |
import csv
import json
import random
import sys
import warnings
from precise_nlp.myio import fill_template
from precise_nlp.preprocess.cspy_ocr import fix_ocr_problems
try:
import pandas as pd
PANDAS = True
except ModuleNotFoundError:
PANDAS = False
import os
try:
import yaml
YAML = True
except ModuleNotFoundError:
YAML = False
from collections import defaultdict, Counter
from jsonschema import validate
from precise_nlp.const.cspy import INDICATION, BOWEL_PREP, EXTENT, NUM_POLYPS
from precise_nlp.const.path import HIGHGRADE_DYSPLASIA, ANY_VILLOUS, VILLOUS, TUBULAR, TUBULOVILLOUS, \
ADENOMA_STATUS, \
ADENOMA_COUNT, LARGE_ADENOMA, ADENOMA_COUNT_ADV, ADENOMA_STATUS_ADV, ADENOMA_DISTAL, ADENOMA_DISTAL_COUNT, \
ADENOMA_PROXIMAL_COUNT, ADENOMA_PROXIMAL, ADENOMA_RECTAL_COUNT, ADENOMA_RECTAL, ADENOMA_UNKNOWN_COUNT, \
ADENOMA_UNKNOWN, PROXIMAL_VILLOUS, DISTAL_VILLOUS, RECTAL_VILLOUS, UNKNOWN_VILLOUS, SIMPLE_HIGHGRADE_DYSPLASIA, \
JAR_ADENOMA_COUNT_ADV, JAR_ADENOMA_DISTAL_COUNT, JAR_ADENOMA_PROXIMAL_COUNT, JAR_ADENOMA_RECTAL_COUNT, \
JAR_ADENOMA_UNKNOWN_COUNT, JAR_SESSILE_SERRATED_ADENOMA_COUNT, CARCINOMA_COUNT, CARCINOMA_MAYBE_COUNT, \
CARCINOMA_IN_SITU_MAYBE_COUNT, CARCINOMA_IN_SITU_COUNT, CARCINOMA_POSSIBLE_COUNT, CARCINOMA_IN_SITU_POSSIBLE_COUNT
from precise_nlp.const.enums import Location
from precise_nlp.doc_parser import parse_file
from precise_nlp.extract.algorithm import get_adenoma_status, get_adenoma_histology, get_highgrade_dysplasia, \
get_adenoma_count, has_large_adenoma, get_adenoma_count_advanced, get_adenoma_distal, get_adenoma_proximal, \
get_adenoma_rectal, get_adenoma_unknown, get_villous_histology, get_dysplasia, get_sessile_serrated_adenoma, \
get_carcinomas, get_carcinomas_maybe, get_carcinomas_in_situ, get_carcinomas_in_situ_maybe, \
get_carcinomas_in_situ_possible, get_carcinomas_possible
from precise_nlp.extract.cspy.cspy import CspyManager, FindingVersion
from precise_nlp.extract.path.path_manager import PathManager
from precise_nlp.extract.maybe_counter import MaybeCounter
from loguru import logger
ITEMS = [
ADENOMA_STATUS,
TUBULAR,
TUBULOVILLOUS,
VILLOUS,
ANY_VILLOUS,
PROXIMAL_VILLOUS,
DISTAL_VILLOUS,
RECTAL_VILLOUS,
UNKNOWN_VILLOUS,
SIMPLE_HIGHGRADE_DYSPLASIA,
HIGHGRADE_DYSPLASIA,
ADENOMA_COUNT,
LARGE_ADENOMA,
ADENOMA_COUNT_ADV,
JAR_ADENOMA_COUNT_ADV,
ADENOMA_STATUS_ADV,
ADENOMA_DISTAL,
ADENOMA_DISTAL_COUNT,
JAR_ADENOMA_DISTAL_COUNT,
ADENOMA_PROXIMAL,
ADENOMA_PROXIMAL_COUNT,
JAR_ADENOMA_PROXIMAL_COUNT,
ADENOMA_RECTAL,
ADENOMA_RECTAL_COUNT,
JAR_ADENOMA_RECTAL_COUNT,
ADENOMA_UNKNOWN,
ADENOMA_UNKNOWN_COUNT,
JAR_ADENOMA_UNKNOWN_COUNT,
JAR_SESSILE_SERRATED_ADENOMA_COUNT,
INDICATION,
NUM_POLYPS,
BOWEL_PREP,
EXTENT,
]
def split_maybe_counters(data):
"""
Separate maybe counters into two additional variables
label__ge - 1 if ge, else 0
label__num - numeric portion of number
:param data:
:return:
"""
res = {}
for k, v in data.items():
if isinstance(v, MaybeCounter):
if v.greater_than:
res[f'{k}__ge'] = 1
res[f'{k}__num'] = v.count - 1
else:
res[f'{k}__ge'] = 1 if v.at_least else 0
res[f'{k}__num'] = v.count
return res
def process_text(path_text='', cspy_text='',
cspy_finding_version=FindingVersion.PRECISE, cspy_extent_search_all=False):
pm = PathManager(path_text)
cm = CspyManager(cspy_text, version=cspy_finding_version, cspy_extent_search_all=cspy_extent_search_all)
data = {}
if pm:
specs, specs_combined, specs_dict = PathManager.parse_jars(path_text)
tb, tbv, vl = get_adenoma_histology(pm)
# count
adenoma_cutoff, adenoma_status, adenoma_count = get_adenoma_count_advanced(pm)
_, _, jar_adenoma_count = get_adenoma_count_advanced(pm, jar_count=True)
# distal
aden_dist_cutoff, aden_dist_status, aden_dist_count = get_adenoma_distal(pm)
_, _, jar_ad_cnt_dist = get_adenoma_distal(pm, jar_count=True)
# proximal
aden_prox_cutoff, aden_prox_status, aden_prox_count = get_adenoma_proximal(pm)
_, _, jar_ad_cnt_prox = get_adenoma_proximal(pm, jar_count=True)
# rectal
aden_rect_cutoff, aden_rect_status, aden_rect_count = get_adenoma_rectal(pm)
_, _, jar_ad_cnt_rect = get_adenoma_rectal(pm, jar_count=True)
# unk
aden_unk_cutoff, aden_unk_status, aden_unk_count = get_adenoma_unknown(pm)
_, _, jar_ad_cnt_unk = get_adenoma_unknown(pm, jar_count=True)
data.update({
ADENOMA_STATUS: get_adenoma_status(specs),
TUBULAR: tb,
TUBULOVILLOUS: bool(tbv),
VILLOUS: bool(vl),
ANY_VILLOUS: get_villous_histology(pm),
PROXIMAL_VILLOUS: get_villous_histology(pm, Location.PROXIMAL),
DISTAL_VILLOUS: get_villous_histology(pm, Location.DISTAL),
RECTAL_VILLOUS: get_villous_histology(pm, Location.RECTAL),
UNKNOWN_VILLOUS: get_villous_histology(pm, Location.UNKNOWN),
SIMPLE_HIGHGRADE_DYSPLASIA: get_highgrade_dysplasia(specs),
HIGHGRADE_DYSPLASIA: get_dysplasia(pm),
ADENOMA_COUNT: get_adenoma_count(specs),
LARGE_ADENOMA: has_large_adenoma(pm, cm, version=cspy_finding_version),
ADENOMA_COUNT_ADV: adenoma_count,
JAR_ADENOMA_COUNT_ADV: jar_adenoma_count,
ADENOMA_STATUS_ADV: adenoma_status,
ADENOMA_DISTAL: aden_dist_status,
ADENOMA_DISTAL_COUNT: aden_dist_count,
JAR_ADENOMA_DISTAL_COUNT: jar_ad_cnt_dist,
ADENOMA_PROXIMAL: aden_prox_status,
ADENOMA_PROXIMAL_COUNT: aden_prox_count,
JAR_ADENOMA_PROXIMAL_COUNT: jar_ad_cnt_prox,
ADENOMA_RECTAL: aden_rect_status,
ADENOMA_RECTAL_COUNT: aden_rect_count,
JAR_ADENOMA_RECTAL_COUNT: jar_ad_cnt_rect,
ADENOMA_UNKNOWN: aden_unk_status,
ADENOMA_UNKNOWN_COUNT: aden_unk_count,
JAR_ADENOMA_UNKNOWN_COUNT: jar_ad_cnt_unk,
JAR_SESSILE_SERRATED_ADENOMA_COUNT: get_sessile_serrated_adenoma(pm, jar_count=True),
CARCINOMA_COUNT: get_carcinomas(pm, jar_count=True),
CARCINOMA_MAYBE_COUNT: get_carcinomas_maybe(pm, jar_count=True),
CARCINOMA_POSSIBLE_COUNT: get_carcinomas_possible(pm, jar_count=True),
CARCINOMA_IN_SITU_COUNT: get_carcinomas_in_situ(pm, jar_count=True),
CARCINOMA_IN_SITU_MAYBE_COUNT: get_carcinomas_in_situ_maybe(pm, jar_count=True),
CARCINOMA_IN_SITU_POSSIBLE_COUNT: get_carcinomas_in_situ_possible(pm, jar_count=True),
})
if cm:
data.update({
INDICATION: cm.indication,
NUM_POLYPS: cm.num_polyps,
BOWEL_PREP: cm.prep,
EXTENT: cm.extent,
})
# split maybe counters into two separate columns
data.update(split_maybe_counters(data))
return data
def get_file_or_empty_string(path, filename, encoding='utf8'):
fp = os.path.join(path, filename)
if not os.path.isfile(fp):
return ''
with open(fp, encoding=encoding) as fh:
return fh.read()
def get_data(filetype, path, identifier=None, path_text=None, cspy_text=None, encoding='utf8',
limit=None, count=None, truth=None, text=None, filenames=None, lookup_table=None,
requires_cspy_text=False):
"""
:param encoding:
:param count:
:param filenames:
:param lookup_table: csv file of identifier,cspyfile,pathfile
:param requires_cspy_text:
:param filetype:
:param path:
:param identifier:
:param path_text:
:param cspy_text:
:param limit:
:param truth:
:param text: deprecated
:return:
"""
if text:
warnings.warn('Use `path_text` rather than `text`.',
DeprecationWarning
)
path_text = text
if path and os.path.isdir(path):
if lookup_table:
with open(lookup_table) as fh:
for line in fh:
identifier, cspy_file, path_file = line.strip().split(',')
if limit and identifier not in limit:
continue
cspy_text = get_file_or_empty_string(path, cspy_file, encoding=encoding)
path_text = get_file_or_empty_string(path, path_file, encoding=encoding)
yield identifier, path_text, cspy_text, None
elif filenames:
for fn in filenames:
fp = os.path.join(path, fn)
if not os.path.exists(fp):
fp = f'{fp}.{filetype}'
yield from get_data(filetype, fp, identifier, path_text, cspy_text, truth)
else:
for i, fn in enumerate(os.listdir(path)):
if count and i >= count:
break
yield from get_data(filetype, os.path.join(path, fn), identifier, path_text,
cspy_text, encoding, count=count, truth=truth)
elif path and filetype == 'txt' and os.path.isfile(path):
with open(path, encoding=encoding) as fh:
yield os.path.basename(path), '', fh.read(), None
elif PANDAS:
if 'DataFrame' in str(type(filetype)):
df = filetype
elif filetype == 'csv':
df = pd.read_csv(path, encoding=encoding)
elif filetype == 'tab' or filetype == 'tsv':
df = pd.read_csv(path, sep='\t', encoding=encoding)
elif filetype == 'sas':
df = | pd.read_sas(path, encoding=encoding) | pandas.read_sas |
# Data Preprocessing
"""ML_Workflow template with required libraries and function calls.
@author:Varshtih
"""
import pandas as pd
import numpy as np
from autoimpute.imputations import MultipleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from scipy import stats
import sweetviz
import seaborn as sns
from pyod.models.feature_bagging import FeatureBagging
# Load Input Files
train_data = pd.read_csv(r"C:\Users\svvar\PycharmProjects\ml_workflow\Algorithims\Data Files\train.csv")
test_data = pd.read_csv(r"C:\Users\svvar\PycharmProjects\ml_workflow\Algorithims\Data Files\test.csv")
train_data.info()
test_data.info()
# Fill in required Inputs
x_train = train_data.iloc[:, list(range(3, 11))]
y_train = train_data.iloc[:, list(range(11,12))].values
x_train_num = train_data.iloc[:, list(range(3, 9))]
x_train_txt = train_data.iloc[:, list(range(9, 11))]
x_train_txt_encode_split = 2 # Split at Column Number
x_test = test_data.iloc[:, list(range(3, 11))]
x_test_num = test_data.iloc[:, list(range(3, 9))]
x_test_txt = test_data.iloc[:, list(range(9, 11))]
x_test_txt_encode_split = 2 # Split at Column Number
# Impute Missing values
# Numerical Imputer
imputer_num = MultipleImputer(strategy='stochastic', return_list=True, n=5, seed=101)
x_train_num_avg = imputer_num.fit_transform(x_train_num)
x_train_num_concat = x_train_num_avg[0][1]
for i in range(len(x_train_num_avg)-1):
x_train_num_concat = pd.concat([x_train_num_concat,x_train_num_avg[i+1][1]], axis=1)
x_train_num_avg = x_train_num_concat.groupby(by=x_train_num_concat.columns, axis=1).apply(lambda g: g.mean(axis=1))
x_test_num_avg = imputer_num.fit_transform(x_test_num)
x_test_num_concat = x_test_num_avg[0][1]
for i in range(len(x_test_num_avg)-1):
x_test_num_concat = pd.concat([x_test_num_concat,x_test_num_avg[i+1][1]], axis=1)
x_test_num_avg = x_test_num_concat.groupby(by=x_test_num_concat.columns, axis=1).apply(lambda g: g.mean(axis=1))
# Categorical Imputer
imputer_txt = MultipleImputer(strategy='categorical', return_list=True, n=10, seed=101)
x_train_txt_avg = imputer_txt.fit_transform(x_train_txt)
x_train_txt_col = list(x_train_txt.columns)
x_train_txt_col.sort()
x_train_txt_concat = x_train_txt_avg[0][1]
for i in range(len(x_train_txt_avg)-1):
x_train_txt_concat = pd.concat([x_train_txt_concat, x_train_txt_avg[i+1][1]], axis=1)
x_train_txt_avg = x_train_txt_concat.groupby(by=x_train_txt_concat.columns, axis=1).apply(lambda g: stats.mode(g, axis=1)[0])
x_train_txt_avg = x_train_txt_avg.sort_index(axis=0)
x_train_txt_avg_temp = pd.DataFrame(x_train_txt_avg[0])
for i in range(len(x_train_txt_avg)-1):
x_train_txt_avg_temp = pd.concat([x_train_txt_avg_temp,pd.DataFrame(x_train_txt_avg[i+1])], axis=1)
x_train_txt_avg_temp.columns = x_train_txt_col
x_train_txt_avg = x_train_txt_avg_temp
x_train_txt = x_train_txt.sort_index(axis=1)
x_test_txt_avg = imputer_txt.fit_transform(x_test_txt)
x_test_txt_col = list(x_test_txt.columns)
x_test_txt_col.sort()
x_test_txt_concat = x_test_txt_avg[0][1]
for i in range(len(x_test_txt_avg)-1):
x_test_txt_concat = pd.concat([x_test_txt_concat, x_test_txt_avg[i+1][1]], axis=1)
x_test_txt_avg = x_test_txt_concat.groupby(by=x_test_txt_concat.columns, axis=1).apply(lambda g: stats.mode(g, axis=1)[0])
x_test_txt_avg = x_test_txt_avg.sort_index(axis=0)
x_test_txt_avg_temp = pd.DataFrame(x_test_txt_avg[0])
for i in range(len(x_test_txt_avg)-1):
x_test_txt_avg_temp = pd.concat([x_test_txt_avg_temp,pd.DataFrame(x_test_txt_avg[i+1])], axis=1)
x_test_txt_avg_temp.columns = x_test_txt_col
x_test_txt_avg = x_test_txt_avg_temp
x_test_txt = x_test_txt.sort_index(axis=1)
# Merge Imputed Training Data and Convert to Values
x_train_Imp = pd.concat([x_train_num, x_train_txt], axis=1)
x_train_Imp = x_train_Imp.iloc[:, :].values
x_train_num_avg = x_train_num_avg.iloc[:, :].values
x_train_txt_label_encode = x_train_txt_avg.iloc[:, list(range(0,x_train_txt_encode_split))].values
x_train_txt_hot_encode = x_train_txt_avg.iloc[:, list(range(x_train_txt_encode_split, len(x_train_txt_avg.columns)))].values
x_train_txt_label_encode_col = list(x_train_txt_avg.iloc[:, list(range(0,x_train_txt_encode_split))].columns)
x_train_txt_hot_encode_col = list(x_train_txt_avg.iloc[:, list(range(x_train_txt_encode_split, len(x_train_txt_avg.columns)))].columns)
x_test_Imp = pd.concat([x_test_num, x_test_txt], axis=1)
x_test_Imp = x_test_Imp.iloc[:, :].values
x_test_num_avg = x_test_num_avg.iloc[:, :].values
x_test_txt_label_encode = x_test_txt_avg.iloc[:, list(range(0,x_test_txt_encode_split))].values
x_test_txt_hot_encode = x_test_txt_avg.iloc[:, list(range(x_test_txt_encode_split, len(x_test_txt_avg.columns)))].values
x_test_txt_label_encode_col = list(x_test_txt_avg.iloc[:, list(range(0,x_test_txt_encode_split))].columns)
x_test_txt_hot_encode_col = list(x_test_txt_avg.iloc[:, list(range(x_test_txt_encode_split, len(x_test_txt_avg.columns)))].columns)
# Label Encode Categorical Variables
# Update onelabel eligible features only
labelencoder_X = LabelEncoder()
for i in range(np.shape(x_train_txt_label_encode)[1]):
x_train_txt_label_encode[:, i-1] = labelencoder_X.fit_transform(x_train_txt_label_encode[:, i-1])
for i in range(np.shape(x_test_txt_label_encode)[1]):
x_test_txt_label_encode[:, i-1] = labelencoder_X.fit_transform(x_test_txt_label_encode[:, i-1])
# Hot Encode Categorical Variables
#x_train_txt_hot_encode = pd.get_dummies(data=x_train_txt_avg.iloc[:, list(range(x_train_txt_encode_split, len(x_train_txt_avg.columns)))], columns=x_train_txt_hot_encode_col)
#x_train_txt_hot_encoded_col = list(x_train_txt_hot_encode.columns)
#x_train_txt_hot_encode = x_train_txt_hot_encode.values
#x_test_txt_hot_encode = pd.get_dummies(data=x_test_txt_avg.iloc[:, list(range(x_test_txt_encode_split, len(x_test_txt_avg.columns)))], columns=x_test_txt_hot_encode_col)
#x_test_txt_hot_encoded_col = list(x_test_txt_hot_encode.columns)
#x_test_txt_hot_encode = x_test_txt_hot_encode.values
x_train_Imp_En = pd.concat([pd.DataFrame(x_train_num_avg), pd.DataFrame(x_train_txt_label_encode)], axis=1) # Update with Hot Encode Data if available
x_test_Imp_En = pd.concat([ | pd.DataFrame(x_test_num_avg) | pandas.DataFrame |
import numpy as np
from .. import predict, quality, checks, utils, prepare, reporting, assess
import os, random
from tifffile import imread, imsave
import matplotlib.pyplot as plt
from astropy.visualization import simple_norm
import wget
import shutil
from enum import Enum
import pandas as pd
import time
from mashumaro import DataClassDictMixin
from collections.abc import Mapping
from pathlib import Path
from dataclasses import dataclass
from typing import List
class params:
class Weights_choice(Enum):
BEST = "best"
LAST = "last"
class Pretrained_model_choice(Enum):
MODEL_NAME = "Model_name"
MODEL_FROM_FILE = "Model_from_file"
class Data_type(Enum):
SINGLE_IMAGES = "Single_Images"
STACKS = "Stacks"
# Defaults should be loaded in per submodule
# def get_defaults():
# # default_params():
# return {
# # "model":"N2V",
# "model_name": None,
# "model_path": None,
# "ref_str": None,
# "Notebook_version": 1.12,
# "initial_learning_rate": 0.0004,
# "number_of_steps": 100,
# "percentage_validation": 10,
# "image_patches": None,
# "loss_function": None,
# "batch_size": 128,
# "patch_size": 64,
# "Training_source": None,
# "number_of_epochs": 100,
# "Use_Default_Advanced_Parameters": False,
# "trained": False,
# "augmentation": False,
# # "pretrained_model": False,
# "Pretrained_model_choice": params.Pretrained_model_choice.MODEL_NAME,
# "Weights_choice": params.Weights_choice.BEST,
# # "QC_model_path": os.path.join(".dl4mic", "qc"),
# "QC_model_path": "",
# "QC_model_name": None,
# }
# if (Use_Default_Advanced_Parameters):
# print("Default advanced parameters enabled")
# # number_of_steps is defined in the following cell in this case
# batch_size = 128
# percentage_validation = 10
# initial_learning_rate = 0.0004
class DictLike(object):
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
def __getitem__(self, arg):
# return getattr(self,arg) #Move away from bloody dict
return getattr(self, arg)
def __setitem__(self, key, value):
setattr(self, key, value)
# return
pass
@dataclass
class Folders(DataClassDictMixin, DictLike):
"""
Extends DataClassDictMixin and DictLike (probably better alternative
availiable) so that it can be initialised with a dict easy
"""
# model_name: str
base_out_folder: str = ".dl4mic"
output_folder: str = base_out_folder
QC_model_path: str = None
Training_source: str = None
Training_target: str = None
model_path: str = None
pretrained_model_path: str = None
Source_QC_folder: str = None
Target_QC_folder: str = None
Prediction_model_folder: str = None
Prediction_model_path: str = None
Data_folder: str = None
h5_file_path: str = None
Saving_path: str = None
def __post_init__(self):
defaults = {
"QC_model_path": "qc",
"Training_source": "training",
"Training_target": "target",
"model_path": "model",
"pretrained_model_path": "pretrained_model",
"Prediction_model_path": "prediction_model",
"Source_QC_folder": "qc_source",
"Target_QC_folder": "qc_target",
"Prediction_model_folder": "pred",
"Data_folder": "data",
"h5_file_path": "weights",
"Saving_path": "augment"
}
for key in defaults:
if self[key] is None:
self[key] = Path(os.path.join(self.output_folder, defaults[key]))
self[key].mkdir(parents=True, exist_ok=True)
# self.QC_model_path = os.path.join(output_folder, "qc")
# self.Training_source = os.path.join(output_folder, "training")
# self.Training_target= os.path.join(output_folder, "target")
# self.model_path = os.path.join(output_folder, "model")
# self.pretrained_model_path = os.path.join(output_folder, "pretrained_model")
# self.Source_QC_folder = os.path.join(output_folder, "qc_source")
# self.Target_QC_folder = os.path.join(output_folder, "qc_target")
# self.Prediction_model_folder = os.path.join(output_folder, "pred")
# self.Data_folder = os.path.join(output_folder, "data")
# self.h5_file_path = os.path.join(output_folder, "weights")
# # self.model_name = model_name
# self.output_folder = os.path.join(self.base_out_folder, self.model_name)
# self.QC_model_path = os.path.join(self.output_folder, "qc")
# self.Training_source = os.path.join(self.output_folder, "training")
# self.Training_target = os.path.join(self.output_folder, "target")
# self.model_path = os.path.join(self.output_folder, "model")
# self.pretrained_model_path = os.path.join(self.output_folder, "pretrained_model")
# self.Source_QC_folder = os.path.join(self.output_folder, "qc_source")
# self.Target_QC_folder = os.path.join(self.output_folder, "qc_target")
# self.Prediction_model_folder = os.path.join(self.output_folder, "pred")
# self.Data_folder = os.path.join(self.output_folder, "data")
# self.h5_file_path = os.path.join(self.output_folder, "weights")
@dataclass
class DL4MicModelParams(DataClassDictMixin, DictLike):
# folders: dataclass
# folders.base_out_folder: str = ".dl4mic"
# X_train: np.array = None
# X_test: np.array = None
# example_image: np.array = None
# TODO make all of these None type and then default in submodule
# May have solved this?
# folders: Folders = Folders()
model_name: str = "temp"
folders: Folders = Folders()
model: str = "dl4mic"
image_patches: int = 100
ref_str: str = "ref"
loss_function: str = "loss"
pretrained_model_choice: bool = False
Use_pretrained_model: bool = False
Use_the_current_trained_model: bool = False
Use_Data_augmentation: bool = False
Notebook_version: float = 1.12
initial_learning_rate: float = 0.0004
number_of_steps: int = 100
number_of_patches: int = 100
percentage_validation: int = 10
batch_size: int = 128
patch_size: int = 64
number_of_epochs: int = 100
Use_Default_Advanced_Parameters: bool = False
trained: bool = False
augmentation: bool = False
# pretrained_model: bool = False
Pretrained_model_choice: str = params.Pretrained_model_choice.MODEL_NAME
Weights_choice: str = params.Weights_choice.BEST
base_out_folder: str = ".dl4mic"
# QC_model_path: str = os.path.join(base_out_folder, "qc")
# Training_source: str = os.path.join(base_out_folder, "training")
# Training_target: str = os.path.join(base_out_folder, "target")
# model_path: str = base_out_folder
# pretrained_model_path: str = os.path.join(base_out_folder, "model")
pretrained_model_name: str = "model"
Source_QC_folder: str = None
Target_QC_folder: str = None
# Prediction_model_folder: str = os.path.join(base_out_folder, "pred")
Prediction_model_name: str = "pred"
# Prediction_model_path: str = Prediction_model_folder
QC_model_name: str = None
Data_type: str = ""
ref_aug: str = str(
'- Augmentor: Bloice, <NAME>., <NAME>,'
'and <NAME>. "Augmentor: an image augmentation '
'library for machine learning." arXiv '
'preprint arXiv:1708.04680 (2017).'
)
bestLearningRate: float = initial_learning_rate
lastLearningRate: float = initial_learning_rate
Multiply_dataset_by: int = 2
Save_augmented_images: bool = False
Use_Default_Augmentation_Parameters: bool = True
rotate_90_degrees: str = 0.5
rotate_270_degrees: str = 0.5
flip_left_right: str = 0.5
flip_top_bottom: str = 0.5
random_zoom: str = 0
random_zoom_magnification: str = 0.9
random_distortion: str = 0
image_shear: str = 0
max_image_shear: str = 10
skew_image: str = 0
skew_image_magnitude: str = 0
def __post_init__(self):
# pass
self.folders.output_folder = os.path.join(self.base_out_folder, self.model_name)
self.folders.QC_dir = Path(os.path.join(self.QC_model_path, self.QC_model_name))
self.folders.__post__init__()
# self.folders.output_folder = self.output_folder
# def __init__(self,*args,**kwargs):
# super().__init__()
# from_dict(self,kwargs)
# super().__init__(**model_config)
# h5_file_path: str = None
# output_folder: str = os.path.join(base_out_folder, model_name)
# folders : object = Folders(model_name)
# folder_list: list = [
# "base_out_folder",
# "QC_model_path",
# "Training_source",
# "Training_target",
# "model_path",
# "pretrained_model_path",
# "pretrained_model_name",
# "Source_QC_folder",
# "Target_QC_folder",
# "Prediction_model_folder",
# "Prediction_model_path",
# "Data_folder",
# "output_folder"
# ]
# def __init__(self,model_config={}):
# super().__init__(model_config)
# DL4MicModelParams = from_dict(data_class=B, data=data)
class DL4MicModel(DL4MicModelParams):
# @dataclass
class data(DictLike):
example_image: np.array = None
X_train: np.array = None
Y_train: np.array = None
X_test: np.array = None
Y_test: np.array = None
time_start: float = None
trained: bool = False
history: np.array = None
def __post_init__(self):
# super().__init__(**model_config)
self.init()
self.paths_and_dirs()
self.model_specifics()
# self.dl4mic_model_config.update(model_config)
self.interface()
def paths_and_dirs(self):
# self.output_folder = os.path.join(self.base_out_folder, self.model_name)
# Path(self.output_folder).mkdir(parents=True, exist_ok=True)
# folder_dict = {k: self.__dict__[k] for k in self.folder_list}
# folder_dict = self.folders.__dict__
self.append_config(utils.make_folders(self.folders.__dict__))
def init(self):
self.authors = ["You"]
pass
def step_3(self):
self.step_3_1()
self.step_3_2()
pass
def step_3_1(self):
self.checks()
pass
def step_3_2(self):
'''
Data augmentation
'''
self.augmentation()
pass
def step_3_3(self):
'''
Load pretrained model
'''
self.load_pretrained_model()
pass
def step_4(self):
'''
Train the network
'''
self.step_4_1()
self.step_4_2()
pass
def step_4_1(self):
'''
Prepare the training data and model for training
'''
self.prepare()
def step_4_2(self):
'''
Start Training
'''
self.train_model()
pass
def step_5(self):
'''
Evaluate your model
'''
self.step_5_1()
self.step_5_2()
pass
def step_5_1(self):
'''
Inspection of the loss function
'''
pass
def step_5_2(self):
'''
Error mapping and quality metrics estimation
'''
self.quality()
def step_6(self):
'''
Using the trained model
'''
self.step_6_1()
self.step_6_2()
def step_6_1(self):
'''
Generate prediction(s) from unseen dataset
'''
self.predict()
def step_6_2(self):
'''
Assess predicted output
'''
self.assess()
def model_specifics(self):
pass
def import_checks(self):
pass
# def __iter__(self):
# return iter(self.__dict__)
# def __len__(self):
# return len(self.__dict__)
# def __getitem__(self, arg):
# # return getattr(self,arg) #Move away from bloody dict
# return getattr(self, arg)
# def __setitem__(self, key, value):
# setattr(self, key, value)
# # return
def model_specifics(self):
pass
def interface(self):
pass
def set_model_config(self):
pass
def set_model_params(self):
pass
def check_model_params(self):
self.check_model_specific_params()
pass
def check_model_specific_params(self):
pass
def get_ref(self):
return self.ref_str
# def __repr__(self):
# self.dl4mic_model_config
def append_config(self, config_dict):
self.__dict__.update(config_dict)
# return self.dl4mic_model_config
def get_config(self):
return self.__dict__
def get_config_df(self):
return | pd.DataFrame(self.__dict__) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import json
from matplotlib import pyplot as plt
import numpy as np
# Configuration
anomaly_color = 'sandybrown'
prediction_color = 'yellowgreen'
training_color = 'yellowgreen'
validation_color = 'gold'
test_color = 'coral'
figsize=(12, 4)
def load_series(file_name, data_folder):
# Load the input data
data_path = f'{data_folder}/data/{file_name}'
data = pd.read_csv(data_path)
data['timestamp'] = pd.to_datetime(data['timestamp'])
data.set_index('timestamp', inplace=True)
# Load the labels
label_path = f'{data_folder}/labels/combined_labels.json'
with open(label_path) as fp:
labels = pd.Series(json.load(fp)[file_name])
labels = pd.to_datetime(labels)
# Load the windows
window_path = f'{data_folder}/labels/combined_windows.json'
window_cols = ['begin', 'end']
with open(window_path) as fp:
windows = pd.DataFrame(columns=window_cols,
data=json.load(fp)[file_name])
windows['begin'] = pd.to_datetime(windows['begin'])
windows['end'] = pd.to_datetime(windows['end'])
# Return data
return data, labels, windows
def plot_series(data, labels=None,
windows=None,
predictions=None,
highlights=None,
val_start=None,
test_start=None,
figsize=figsize):
# Open a new figure
plt.close('all')
plt.figure(figsize=figsize)
# Plot data
plt.plot(data.index, data.values, zorder=0, label='data')
# Rotated x ticks
plt.xticks(rotation=45)
# Plot labels
if labels is not None:
plt.scatter(labels.values, data.loc[labels],
color=anomaly_color, zorder=2,
label='labels')
# Plot windows
if windows is not None:
for _, wdw in windows.iterrows():
plt.axvspan(wdw['begin'], wdw['end'],
color=anomaly_color, alpha=0.3, zorder=1)
# Plot training data
if val_start is not None:
plt.axvspan(data.index[0], val_start,
color=training_color, alpha=0.1, zorder=-1)
if val_start is None and test_start is not None:
plt.axvspan(data.index[0], test_start,
color=training_color, alpha=0.1, zorder=-1)
if val_start is not None:
plt.axvspan(val_start, test_start,
color=validation_color, alpha=0.1, zorder=-1)
if test_start is not None:
plt.axvspan(test_start, data.index[-1],
color=test_color, alpha=0.3, zorder=0)
# Predictions
if predictions is not None:
plt.scatter(predictions.values, data.loc[predictions],
color=prediction_color, alpha=.4, zorder=3,
label='predictions')
plt.legend()
plt.tight_layout()
def plot_autocorrelation(data, max_lag=100, figsize=figsize):
# Open a new figure
plt.close('all')
plt.figure(figsize=figsize)
# Autocorrelation plot
pd.plotting.autocorrelation_plot(data['value'])
# Customized x limits
plt.xlim(0, max_lag)
# Rotated x ticks
plt.xticks(rotation=45)
plt.tight_layout()
def plot_histogram(data, bins=10, vmin=None, vmax=None, figsize=figsize):
# Build a new figure
plt.close('all')
plt.figure(figsize=figsize)
# Plot a histogram
plt.hist(data, density=True, bins=bins)
# Update limits
lims = plt.xlim()
if vmin is not None:
lims = (vmin, lims[1])
if vmax is not None:
lims = (lims[0], vmax)
plt.xlim(lims)
plt.tight_layout()
def plot_histogram2d(xdata, ydata, bins=10, figsize=figsize):
# Build a new figure
plt.close('all')
plt.figure(figsize=figsize)
# Plot a histogram
plt.hist2d(xdata, ydata, density=True, bins=bins)
plt.tight_layout()
def plot_density_estimator_1D(estimator, xr, figsize=figsize):
# Build a new figure
plt.close('all')
plt.figure(figsize=figsize)
# Plot the estimated density
xvals = xr.reshape((-1, 1))
dvals = np.exp(estimator.score_samples(xvals))
plt.plot(xvals, dvals)
plt.tight_layout()
def plot_density_estimator_2D(estimator, xr, yr, figsize=figsize):
# Plot the estimated density
nx = len(xr)
ny = len(yr)
xc = np.repeat(xr, ny)
yc = np.tile(yr, nx)
data = np.vstack((xc, yc)).T
dvals = np.exp(estimator.score_samples(data))
dvals = dvals.reshape((nx, ny))
# Build a new figure
plt.close('all')
plt.figure(figsize=figsize)
plt.pcolor(dvals)
plt.tight_layout()
# plt.xticks(np.arange(0, len(xr)), xr)
# plt.yticks(np.arange(0, len(xr)), yr)
def get_pred(signal, thr):
return | pd.Series(signal.index[signal >= thr]) | pandas.Series |
import pandas as pd
def read_jhu(date):
"""Provide date in MM-DD-YYYY format"""
url = f'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/{date}.csv'
data = pd.read_csv(url,usecols=['Province_State', 'Last_Update', 'Incident_Rate', 'Mortality_Rate', 'Testing_Rate', 'Hospitalization_Rate'], parse_dates=['Last_Update'])
data = data.loc[~(d.Province_State == 'Diamond Princess')]
data.Last_Update = data.Last_Update.dt.date
return data
def combine_jhu(range_start, range_end):
import requests
start_url = f'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/{range_start}.csv'
end_url = f'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/{range_end}.csv'
start_response = requests.get(url=start_url).status_code
end_response = requests.get(url=end_url).status_code
if start_response != 200:
print(f'Start Date Response {start_response}')
if end_response != 200:
print(f'End Date Response {end_response}')
dates_list = [date.strftime('%m-%d-%Y') for date in pd.date_range(start=range_start, end=range_end)]
frames = [read_jhu(date) for date in dates_list]
result = | pd.concat(frames) | pandas.concat |
"""
Collection of utilities used for model learning and evaluation.
============================================================================================================================
This code is provided under the MIT License and is meant to be helpful, but WITHOUT ANY WARRANTY;
November 2020 by <NAME> and <NAME>; University of Toronto + Vector Institute
============================================================================================================================
Notes:
"""
import logging
import numpy as np
from hashlib import sha1
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
from models.common import mask_from_lengths
from torch.utils.data import DataLoader
from models.NeuralCDE.metamodel import NeuralCDE
import controldiffeq
logger = logging.getLogger(__name__)
def human_evaluation(env, agent, human_trajectories, use_soc_state=True):
rewards = []
for ep, trajectory in enumerate(human_trajectories):
env.reset()
agent.reset()
for action in trajectory:
env.act(action)
terminal = False
agent_reward = 0 # NOT including reward accumulated along human trajectory
s = env.get_soc_state() if use_soc_state else env.get_pixel_state()
while not terminal:
action = agent.get_action(s, evaluate=True)
pixel_state, r, terminal, soc_state = env.act(action)
s = soc_state if use_soc_state else pixel_state
agent_reward += r
rewards.append(agent_reward)
return rewards
def plot(data={}, loc="visualization.pdf", x_label="", y_label="", title="", kind='line',
legend=True, index_col=None, clip=None, moving_average=False):
# pass
if all([len(data[key]) > 1 for key in data]):
if moving_average:
smoothed_data = {}
for key in data:
smooth_scores = [np.mean(data[key][max(0, i - 10):i + 1]) for i in range(len(data[key]))]
smoothed_data['smoothed_' + key] = smooth_scores
smoothed_data[key] = data[key]
data = smoothed_data
df = pd.DataFrame(data=data)
ax = df.plot(kind=kind, legend=legend, ylim=clip)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_title(title)
plt.tight_layout()
plt.savefig(loc)
plt.close()
def write_to_csv(data={}, loc="data.csv"):
if all([len(data[key]) > 1 for key in data]):
df = | pd.DataFrame(data=data) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Quant Analyst Intern Assessment
#
# ## Section #2: Multivariate Time Series Analysis
#
# ### In this section, you will build a Multivariate Time Series Model. Please use Jupyter Notebook for this exercise. You may answer all questions in MARKDOWN inside the Jupyter Notebook.
#
# ### a. Propose up to five factors that may affect the USD/CAD trading range and explain your reasons for making these assumptions. Also explain the type of data frequency you will use (for example, intraday 5 minutes/daily/weekly).
#
# *I choose the following four factors:
# WTI crude oil price, xauusd, U.S. retail sales year-on-year, U.S. crude oil exports to Canada.*
#
# *I think there are not many factors that affect USD/CAD. It is mainly considered from the oil price data and U.S. economic data.*
#
# *First of all, the Canadian dollar is a commodity currency. Canada’s economic orientation is also dominated by exports. It is one of the important oil exporting countries. Therefore, rising oil prices may put a lot of pressure on the Canadian dollar. At this time, its purchasing power is limited. It will be greatly reduced, and it will depreciate against major currencies. It should be noted that although Canada exports a lot of oil, it also imports a lot of oil from the United States. A considerable part of this is that Canada first exported crude oil to the United States, then import it back from the United States. So I chose the WTI crude oil price and the amount of crude oil exported from the U.S. to Canada.*
#
# *Secondly, US consumption data, QE, Taper, and fiscal policies will all have an impact on usd/cad. Gold is closely related to the US dollar, so I chose xauusd. Since consumption accounts for a large proportion of the US economy, so from the perspective of consumption data I chose the retail sales year-on-year, and similarly, I can also choose the consumer confidence index instead. Here is a detail, the poor employment data of the United States will drive the Canadian dollar to rise, because the United States is full of employment, and the production of shale oil also increases, the oil prices fall.*
#
# *The specific choice actually depends on the strategy in different periods. For example, the current international oil prices are rising, but Canadian export oil is selling well, causing usdcad to fall a lot. If the U.S. dollar taper, international oil prices are weak, OPEC increases production capacity, and the U.S. dollar appreciates, the Canadian dollar will weaken relatively, and the logic will become dependent on US consumption data and manufacturing data.*
#
# *In the actual trading strategy, the higher the data frequency, the better. The FX leverage ratio is very high, so it is trading back and forth in the min range, and no one can bear the fluctuation of the next day.*
#
# *And I think the best construction of this model should be based on the long-term trend of the U.S. dollar index and the long-term trend of international oil prices, to build a large-scale model of usdcad, and build it with mathematical data from a macro perspective. There is a dual relationship between the surface and the inside called the trend structure in technical theory, that is, any trend in a large level must fall in a certain direction. The long-term trend is the general direction, and the time series modeling can be better in the changing market to capture the high and low points of our trading, so it will be more useful in higher frequency transactions.*
#
# *However, due to the limited frequency and range of data I can obtain, and in order to simplify the model, the data selected below are all daily frequencies.*
# ### b. Causality & Statistical Signifiance
#
# i. Utilize the Granger’s Causality Test to test foe causation in the variables that you have selected. Construct a matrix to show the p-values of the variables against one another (i.e., you also need to show that there are possible relationships between one another; not just USD/CAD).
#
# ii. [Test for Statistical Significance Between Time Series] Use the Johassen’s test of cointegration to check statistical significance in your variables.
#
# ### c. [Check for Stationarity] Implement an ADF test to check for stationarity. If it is not stationary, conduct a first-order differencing and re-check for stationarity. Show the ADF p-values for all the selected variables. If first-order differencing is not stationary, conduct a second-order differencing and re-run the ADF test.
#
# ### d. [Model Selection] Using fit comparison estimates of AIC, BIC, FPE, and HQIC. Using the Fit comparison estimates to derive the most optimal number of lags for the model.
#
# ### Data Preprocessing
#
# In[1]:
# Data Preprocessing
import numpy as np
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
def Interpolation(data_process):
"""
Interpolation
Perform arithmetic interpolation on the missing values of a column of data
"""
j = 0
df_seq = []
df = data_process.values
# arithmetic interpolation, the value of 0 is also considered here.
for i in range(len(df)):
if df[i] != 0:
diff = (df[i]-df[j])/(i-j)
data_arange = np.arange(df[j], df[i], diff)
j = i
for num in range(len(data_arange)):
df_seq.append(data_arange[num])
#if i == len(df)-1 and df[i] != 0:
# df_seq.append(df[i])
if i == len(df)-1 and df[i] == 0:
end = df[j] + diff * (i-j)
data_arange = np.arange(df[j], end, diff)
for num in range(len(data_arange)):
df_seq.append(data_arange[num])
if len(df_seq) != len(df):
df_seq.append(end)
return df_seq
data = pd.read_csv('./HistoryData.csv') # Read data.
data['Date'] = pd.to_datetime(data['Date'])
data.index = data['Date'] # Convert date format
# data.info()
data['WTI'] = data['WTI'].bfill() # There are few missing values in wti, so only mean interpolation is used.
data = data.fillna(0)
# These two are monthly data, and its trend needs to be retained, so use arithmetic interpolation
data['Sales_Rate'] = Interpolation(data['Sales_Rate'])
data['CrudeOilExports_UStoCanada'] = Interpolation(data['CrudeOilExports_UStoCanada'])
data = data.drop(data[(data['Sales_Rate']==0) |(data['CrudeOilExports_UStoCanada']==0)].index)
data.to_csv('./train_data.csv', index = False, header = True)
data.describe()
# In[4]:
# Data Visualization
DailyWTI = plt.figure(figsize = (10, 4))
ax = DailyWTI.add_subplot(111)
ax.set(title = 'WTI_DailyData',
ylabel = 'Price', xlabel='Date')
plt.plot(data['Date'], data['WTI'])
plt.show()
DailyXAU_USD = plt.figure(figsize = (10, 4))
ax = DailyXAU_USD.add_subplot(111)
ax.set(title = 'XAU_USD_DailyData',
ylabel = 'Price', xlabel='Date')
plt.plot(data['Date'], data['XAU_USD'])
plt.show()
DailySales_Rate = plt.figure(figsize = (10, 4))
ax = DailySales_Rate.add_subplot(111)
ax.set(title = 'Sales_Rate_DailyData',
ylabel = 'Price', xlabel='Date')
plt.plot(data['Date'], data['Sales_Rate'])
plt.show()
DailyCrudeOilExports_UStoCanada = plt.figure(figsize = (10, 4))
ax = DailyCrudeOilExports_UStoCanada.add_subplot(111)
ax.set(title = 'CrudeOilExports_UStoCanada_DailyData',
ylabel = 'Price', xlabel='Date')
plt.plot(data['Date'], data['CrudeOilExports_UStoCanada'])
plt.show()
# ### Granger Causality Tests
#
# ##### if p is less than 0.05, Granger causality is considered
# In[5]:
def Grangercausalitytests(data, maxlag_num):
"""
Granger Causality Tests
if p is less than 0.05, Granger causality is considered
"""
from statsmodels.tsa.stattools import grangercausalitytests
grangercausalitytests(data, maxlag=maxlag_num)
# All variables need to be tested
print('Grangercausalitytests Result between WTI and USD_CAD')
Grangercausalitytests(data[['WTI', 'USD_CAD']], 3)
print('\nGrangercausalitytests Result between XAU_USD and USD_CAD')
Grangercausalitytests(data[['XAU_USD', 'USD_CAD']], 3)
print('\nGrangercausalitytests Result between Sales_Rate and USD_CAD')
Grangercausalitytests(data[['Sales_Rate', 'USD_CAD']], 3)
print('\nGrangercausalitytests Result between CrudeOilExports_UStoCanada and USD_CAD')
Grangercausalitytests(data[['CrudeOilExports_UStoCanada', 'USD_CAD']], 3)
print('\nGrangercausalitytests Result between WTI and CrudeOilExports_UStoCanada')
Grangercausalitytests(data[['WTI', 'CrudeOilExports_UStoCanada']], 3)
print('\nGrangercausalitytests Result between Sales_Rate and CrudeOilExports_UStoCanada')
Grangercausalitytests(data[['Sales_Rate', 'CrudeOilExports_UStoCanada']], 3)
print('\nGrangercausalitytests Result between XAU_USD and CrudeOilExports_UStoCanada')
Grangercausalitytests(data[['XAU_USD', 'CrudeOilExports_UStoCanada']], 3)
print('\nGrangercausalitytests Result between WTI and Sales_Rate')
Grangercausalitytests(data[['WTI', 'Sales_Rate']], 3)
print('\nGrangercausalitytests Result between XAU_USD and Sales_Rate')
Grangercausalitytests(data[['XAU_USD', 'Sales_Rate']], 3)
print('\nGrangercausalitytests Result between WTI and XAU_USD')
Grangercausalitytests(data[['WTI', 'XAU_USD']], 3)
# ### ADF: Augmented Dickey-Fuller Unit Root Tests
#
# ##### Check if it is a stationary series
# In[6]:
def ADF_diff(timeseries, name):
"""
ADF: Augmented Dickey-Fuller unit root test.
Regression: Constant and trend order to include {“c”,”ct”,”ctt”,”nc”}
1. “c” : constant only (default).
2. “ct” : constant and trend.
3. “ctt” : constant, and linear and quadratic trend.
4. “nc” : no constant, no trend.
"""
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller as ADF
# Sequence after generating the differencing.
timeseries_diff1 = timeseries.diff(1)
timeseries_diff2 = timeseries_diff1.diff(1)
timeseries_diff1 = timeseries_diff1.fillna(0)
timeseries_diff2 = timeseries_diff2.fillna(0)
# ADF unit root test -- ct
print('Result of ADF--ct Test ')
timeseries_adf = ADF(timeseries[name].tolist(), regression='ct')
timeseries_diff1_adf = ADF(timeseries_diff1[name].tolist(), regression='ct')
timeseries_diff2_adf = ADF(timeseries_diff2[name].tolist(), regression='ct')
print('timeseries_adf : ', timeseries_adf)
print('timeseries_diff1_adf : ', timeseries_diff1_adf)
print('timeseries_diff2_adf : ', timeseries_diff2_adf)
plt.figure(figsize=(12, 8))
plt.plot(timeseries, label='Original', color='blue')
plt.plot(timeseries_diff1, label='Diff1', color='red')
plt.plot(timeseries_diff2, label='Diff2', color='purple')
plt.legend(loc='best')
plt.show()
# ADF unit root test -- c
print('Result of ADF--c Test ')
timeseries_adf = ADF(timeseries[name].tolist(), regression='c')
timeseries_diff1_adf = ADF(timeseries_diff1[name].tolist(), regression='c')
timeseries_diff2_adf = ADF(timeseries_diff2[name].tolist(), regression='c')
print('timeseries_adf : ', timeseries_adf)
print('timeseries_diff1_adf : ', timeseries_diff1_adf)
print('timeseries_diff2_adf : ', timeseries_diff2_adf)
plt.figure(figsize=(12, 8))
plt.plot(timeseries, label='Original', color='blue')
plt.plot(timeseries_diff1, label='Diff1', color='red')
plt.plot(timeseries_diff2, label='Diff2', color='purple')
plt.legend(loc='best')
plt.show()
# ADF unit root test -- nc
print('Result of ADF--nc Test ')
timeseries_adf = ADF(timeseries[name].tolist(), regression='nc')
timeseries_diff1_adf = ADF(timeseries_diff1[name].tolist(), regression='nc')
timeseries_diff2_adf = ADF(timeseries_diff2[name].tolist(), regression='nc')
print('timeseries_adf : ', timeseries_adf)
print('timeseries_diff1_adf : ', timeseries_diff1_adf)
print('timeseries_diff2_adf : ', timeseries_diff2_adf)
plt.figure(figsize=(12, 8))
plt.plot(timeseries, label='Original', color='blue')
plt.plot(timeseries_diff1, label='Diff1', color='red')
plt.plot(timeseries_diff2, label='Diff2', color='purple')
plt.legend(loc='best')
plt.show()
# Parse data with date for training
dateparse = lambda dates: datetime.strptime(dates, '%Y-%m-%d')
train_data = pd.read_csv('./train_data.csv', parse_dates=['Date'],
index_col='Date', date_parser=dateparse)
# Extract the data of each variable for future use
data_WTI = pd.DataFrame(train_data['WTI'])
data_XAU_USD = pd.DataFrame(train_data['XAU_USD'])
data_Sales_Rate = pd.DataFrame(train_data['Sales_Rate'])
data_CrudeOilExports_UStoCanada = pd.DataFrame(train_data['CrudeOilExports_UStoCanada'])
data_USD_CAD = pd.DataFrame(train_data['USD_CAD'])
# ADF Tests
print("Resulty of ADF -- WTI")
ADF_diff(data_WTI, 'WTI')
print("Resulty of ADF -- XAU_USD")
ADF_diff(data_XAU_USD, 'XAU_USD')
print("Resulty of ADF -- Sales_Rate")
ADF_diff(data_Sales_Rate, 'Sales_Rate')
print("Resulty of ADF -- CrudeOilExports_UStoCanada")
ADF_diff(data_CrudeOilExports_UStoCanada, 'CrudeOilExports_UStoCanada')
print("Resulty of ADF -- USD_CAD")
ADF_diff(data_USD_CAD, 'USD_CAD')
# ### Fit Comparison Estimates of AIC, BIC, FPE, and HQIC.
#
# ##### Using fit comparison estimates of AIC, BIC, FPE, and HQIC.
# ##### Using the Fit comparison estimates to derive the most optimal number of lags for the model.
# In[7]:
def order_selection(timeseries):
"""
Select the optimal lag order.
Use Vector Auto Regression (VAR) Model to select order.
The var model can pass a maximum number of lags and the order criterion to use for lag order selection.
"""
import statsmodels.api as sm
from statsmodels.tsa.api import VAR
# Vector Auto Regression (VAR) Model
var_model = VAR(timeseries)
#Lag order selection
order = var_model.select_order(10)
print(order.summary())
#var_results = var_model.fit(maxlags=5, ic='aic')
#var_results.summary()
# Use original data to select order
order_selection(train_data)
# data after differencing
train_data_diff = train_data
data_WTI_diff1 = data_WTI.diff(1)
data_WTI_diff1 = data_WTI_diff1.fillna(0)
train_data_diff['WTI'] = data_WTI_diff1
data_XAU_USD_diff1 = data_XAU_USD.diff(1)
data_XAU_USD_diff1 = data_XAU_USD_diff1.fillna(0)
train_data_diff['XAU_USD'] = data_XAU_USD_diff1
data_Sales_Rate_diff1 = data_Sales_Rate.diff(1)
data_Sales_Rate_diff2 = data_Sales_Rate_diff1.diff(1)
data_Sales_Rate_diff2 = data_Sales_Rate_diff2.fillna(0)
train_data_diff['Sales_Rate'] = data_Sales_Rate_diff2
data_CrudeOilExports_UStoCanada_diff1 = data_CrudeOilExports_UStoCanada.diff(1)
data_CrudeOilExports_UStoCanada_diff2 = data_CrudeOilExports_UStoCanada_diff1.diff(1)
data_CrudeOilExports_UStoCanada_diff2 = data_CrudeOilExports_UStoCanada_diff2.fillna(0)
train_data_diff['CrudeOilExports_UStoCanada'] = data_CrudeOilExports_UStoCanada_diff2
data_USD_CAD_diff1 = data_USD_CAD.diff(1)
data_USD_CAD_diff1 = data_USD_CAD_diff1.fillna(0)
train_data_diff['USD_CAD'] = data_USD_CAD_diff1
# Use data after differencing to select order
order_selection(train_data_diff)
# ### Johassen’s test of cointegration
# In[8]:
def coint_johansen(timeseries):
"""
Johassen’s test of cointegration
Use the Johassen’s test of cointegration to check statistical significance in your variables.
"""
from statsmodels.tsa.vector_ar import vecm
# Johansen cointegration test, situation setting = continuous features
jres1 = vecm.coint_johansen(timeseries, det_order=0, k_ar_diff=1)
# View johansen cointegration test results
johansen_result = vecm.select_coint_rank(timeseries,det_order=0, k_ar_diff=1, signif=0.1)
print(johansen_result.summary())
# Choice of rank
print(johansen_result.rank)
j_name=np.array([['Order'], ['trace statistics'], ['CV 90%'], ['CV 95%'], ['CV 99%'] ] )
print(j_name)
j_order = jres1.ind
print(j_order) # Order of eigenvalues
j_lr1 = jres1.lr1
print(j_lr1) # trace statistics
j_cvt = jres1.cvt
print(j_cvt) # Critical values (90%, 95%, 99%) of trace statistic
# j_result = np.vstack(( j_order, j_lr1, j_cvt))
# print(j_result)
| pd.DataFrame(jres1.r0t) | pandas.DataFrame |
# --------------
import pandas as pd
from collections import Counter
# Load dataset
data = | pd.read_csv(path) | pandas.read_csv |
import os
import numpy as np
import pandas as pd
from multiprocessing import Pool
from matplotlib.dates import num2date, date2num
import datetime as dt
import sys
sys.path.append("./")
sys.path.append("create_plots/")
import utils
def to_probability(row, o=pd.DataFrame(), region_map=[]):
e = np.rint(row["elv"])
slist = row["slist"]
rng = row["range"]
if e > 4: X = o[(o.elv == e) & (o.slist >= slist-1) & (o.slist <= slist+1)]
else: X = o[(o.slist >= slist-1) & (o.slist <= slist+1)]
if len(X) == 0: row["region"], row["hop"] = "", -1
else:
#r, h = max(X.region.tolist(),key=X.region.tolist().count),\
#max(X.hop.tolist(),key=X.hop.tolist().count)
print(X.hop.tolist(), X.region.tolist())
row["region"], row["hop"] = "", -1
return row
def fetch_regions():
regions = {
"D":{"heights":80, "hops":np.array([0.5]), "angles":np.arange(5,45,1)},
"E":{"heights":110, "hops":np.array([0.5, 1, 1.5]), "angles":np.arange(5,45,1)},
"F":{"heights":250, "hops":np.array([0.5, 1, 1.5, 2., 2.5, 3.]), "angles":np.arange(5,45,1)}
}
region_map = []
for k in regions.keys():
re = regions[k]
for mh in re["hops"]:
region_map.append(str(mh) + k)
if not os.path.exists("hop_info.csv"):
fregions, fhops, fangles, frange, fheight = [], [], [], [], []
for k in regions.keys():
re = regions[k]
for a in re["angles"]:
for mh in re["hops"]:
fregions.append(k)
fhops.append(mh)
fangles.append(a)
rng = mh*re["heights"]/np.sin(np.deg2rad(a))
frange.append(rng)
o = pd.DataFrame()
o["region"], o["hop"], o["elv"], o["range"] = fregions, fhops, fangles, frange
o["slist"] = o.range.apply(lambda x: int((x-180)/45))
o.to_csv("hop_info.csv", index=False, header=True)
else: o = pd.read_csv("hop_info.csv")
return o, region_map, regions
def estimate_truths(u, Re=6378.1):
u["range"] = u.slist.apply(lambda x: 180. + 45.*x)
hop = np.zeros((len(u)))
kx = 0
for i, x in u.iterrows():
hop[kx] = -1
d, e = x["range"], x["elv"]
h = np.sqrt( d**2 + Re**2 + (2*d*Re*np.sin(np.deg2rad(e))) ) - Re
if (h >= 75 and h < 115) or (h >= 115 and h < 150) or (h >= 150 and h < 900): hop[kx] = 0.5
d = x["range"]/2
h = np.sqrt( d**2 + Re**2 + (2*d*Re*np.sin(np.deg2rad(e))) ) - Re
if (h >= 75 and h < 115) or (h >= 115 and h < 150) or (h >= 150 and h < 900): hop[kx] = 1.0
kx += 1
#u = u.apply(to_probability, args=(o, region_map,), axis=1)
#clust = np.array(u.labels)
#region, hop = np.array([""]*len(clust)), np.array([-1]*len(clust)).astype(float)
#for c in np.unique(clust):
# ux = u[u.labels==c]
# idx = clust==c
# e = np.rint(np.median(ux.elv))
# slist = np.rint(np.median(ux.slist))
# print(e, slist)
# if slist < 7: region[idx], hop[idx] = "D", 0.5
# if slist > 7 and slist < 25: region[idx], hop[idx] = "", 0.5
# if slist >= 25 and slist < 50: region[idx], hop[idx] = "", 1.0
u["hop"] = hop
return u
def get_kappa(y1, y2):
from sklearn.metrics import cohen_kappa_score
k = 0.2 + cohen_kappa_score(y1, y2)
return k
def get_fetch_sd_data(rad, dates, procs=16):
from get_sd_data import FetchData
fd = FetchData(rad, dates)
b, _ = fd.fetch_data()
u = fd.convert_to_pandas(b)
#u = u[u.bmnum==d.bmnum.tolist()[0]]
#d["dn"] = d.time.apply(lambda a: num2date(a))
#d = d[(d.time>=date2num(dates[0])) & (d.time<=date2num(dates[1]))]
#u["labels"] = d.labels
df_split = np.array_split(u, procs)
pool = Pool(procs)
print(" Going into procs...")
u = pd.concat(pool.map(estimate_truths, df_split))
pool.close()
pool.join()
return u
def generate_stats():
fname = "../outputs/cluster_tags_def_params/{rad}.{a_name}.gmm.{dn}.csv"
pubfile = utils.get_pubfile()
conn = utils.get_session(key_filename=pubfile)
m = | pd.read_csv("kappa_stat.csv", parse_dates=["date"]) | pandas.read_csv |
import pandas as pd
from ..common import _getJson, _strToList, _raiseIfNotStr, _strOrDate, _reindex, _toDatetime
def tops(symbols=None, token='', version=''):
'''TOPS provides IEX’s aggregated best quoted bid and offer position in near real time for all securities on IEX’s displayed limit order book.
TOPS is ideal for developers needing both quote and trade data.
https://iexcloud.io/docs/api/#tops
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
dict: result
'''
symbols = _strToList(symbols)
if symbols:
return _getJson('tops?symbols=' + ','.join(symbols) + '%2b', token, version)
return _getJson('tops', token, version)
def topsDF(symbols=None, token='', version=''):
'''TOPS provides IEX’s aggregated best quoted bid and offer position in near real time for all securities on IEX’s displayed limit order book.
TOPS is ideal for developers needing both quote and trade data.
https://iexcloud.io/docs/api/#tops
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
df = pd.io.json.json_normalize(tops(symbols, token, version))
_toDatetime(df)
_reindex(df, 'symbol')
return df
def last(symbols=None, token='', version=''):
'''Last provides trade data for executions on IEX. It is a near real time, intraday API that provides IEX last sale price, size and time.
Last is ideal for developers that need a lightweight stock quote.
https://iexcloud.io/docs/api/#last
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
dict: result
'''
symbols = _strToList(symbols)
if symbols:
return _getJson('tops/last?symbols=' + ','.join(symbols) + '%2b', token, version)
return _getJson('tops/last', token, version)
def lastDF(symbols=None, token='', version=''):
'''Last provides trade data for executions on IEX. It is a near real time, intraday API that provides IEX last sale price, size and time.
Last is ideal for developers that need a lightweight stock quote.
https://iexcloud.io/docs/api/#last
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
df = pd.io.json.json_normalize(last(symbols, token, version))
_toDatetime(df)
_reindex(df, 'symbol')
return df
def deep(symbol=None, token='', version=''):
'''DEEP is used to receive real-time depth of book quotations direct from IEX.
The depth of book quotations received via DEEP provide an aggregated size of resting displayed orders at a price and side,
and do not indicate the size or number of individual orders at any price level.
Non-displayed orders and non-displayed portions of reserve orders are not represented in DEEP.
DEEP also provides last trade price and size information. Trades resulting from either displayed or non-displayed orders matching on IEX will be reported. Routed executions will not be reported.
https://iexcloud.io/docs/api/#deep
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
dict: result
'''
_raiseIfNotStr(symbol)
if symbol:
return _getJson('deep?symbols=' + symbol, token, version)
return _getJson('deep', token, version)
def deepDF(symbol=None, token='', version=''):
'''DEEP is used to receive real-time depth of book quotations direct from IEX.
The depth of book quotations received via DEEP provide an aggregated size of resting displayed orders at a price and side,
and do not indicate the size or number of individual orders at any price level.
Non-displayed orders and non-displayed portions of reserve orders are not represented in DEEP.
DEEP also provides last trade price and size information. Trades resulting from either displayed or non-displayed orders matching on IEX will be reported. Routed executions will not be reported.
https://iexcloud.io/docs/api/#deep
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
df = pd.io.json.json_normalize(deep(symbol, token, version))
_toDatetime(df)
return df
def auction(symbol=None, token='', version=''):
'''DEEP broadcasts an Auction Information Message every one second between the Lock-in Time and the auction match for Opening and Closing Auctions,
and during the Display Only Period for IPO, Halt, and Volatility Auctions. Only IEX listed securities are eligible for IEX Auctions.
https://iexcloud.io/docs/api/#deep-auction
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
dict: result
'''
_raiseIfNotStr(symbol)
if symbol:
return _getJson('deep/auction?symbols=' + symbol, token, version)
return _getJson('deep/auction', token, version)
def auctionDF(symbol=None, token='', version=''):
'''DEEP broadcasts an Auction Information Message every one second between the Lock-in Time and the auction match for Opening and Closing Auctions,
and during the Display Only Period for IPO, Halt, and Volatility Auctions. Only IEX listed securities are eligible for IEX Auctions.
https://iexcloud.io/docs/api/#deep-auction
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
df = pd.io.json.json_normalize(auction(symbol, token, version))
_toDatetime(df)
return df
def book(symbol=None, token='', version=''):
'''Book shows IEX’s bids and asks for given symbols.
https://iexcloud.io/docs/api/#deep-book
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
dict: result
'''
_raiseIfNotStr(symbol)
if symbol:
return _getJson('deep/book?symbols=' + symbol, token, version)
return _getJson('deep/book', token, version)
def bookDF(symbol=None, token='', version=''):
'''Book shows IEX’s bids and asks for given symbols.
https://iexcloud.io/docs/api/#deep-book
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
x = book(symbol, token, version)
data = []
for key in x:
d = x[key]
d['symbol'] = key
data.append(d)
df = pd.io.json.json_normalize(data)
_toDatetime(df)
return df
def opHaltStatus(symbol=None, token='', version=''):
'''The Exchange may suspend trading of one or more securities on IEX for operational reasons and indicates such operational halt using the Operational halt status message.
IEX disseminates a full pre-market spin of Operational halt status messages indicating the operational halt status of all securities.
In the spin, IEX will send out an Operational Halt Message with “N” (Not operationally halted on IEX) for all securities that are eligible for trading at the start of the Pre-Market Session.
If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System at the start of the Pre-Market Session.
After the pre-market spin, IEX will use the Operational halt status message to relay changes in operational halt status for an individual security.
https://iexcloud.io/docs/api/#deep-operational-halt-status
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
dict: result
'''
_raiseIfNotStr(symbol)
if symbol:
return _getJson('deep/op-halt-status?symbols=' + symbol, token, version)
return _getJson('deep/op-halt-status', token, version)
def opHaltStatusDF(symbol=None, token='', version=''):
'''The Exchange may suspend trading of one or more securities on IEX for operational reasons and indicates such operational halt using the Operational halt status message.
IEX disseminates a full pre-market spin of Operational halt status messages indicating the operational halt status of all securities.
In the spin, IEX will send out an Operational Halt Message with “N” (Not operationally halted on IEX) for all securities that are eligible for trading at the start of the Pre-Market Session.
If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System at the start of the Pre-Market Session.
After the pre-market spin, IEX will use the Operational halt status message to relay changes in operational halt status for an individual security.
https://iexcloud.io/docs/api/#deep-operational-halt-status
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
x = opHaltStatus(symbol, token, version)
data = []
for key in x:
d = x[key]
d['symbol'] = key
data.append(d)
df = pd.DataFrame(data)
_toDatetime(df)
return df
def officialPrice(symbol=None, token='', version=''):
'''The Official Price message is used to disseminate the IEX Official Opening and Closing Prices.
These messages will be provided only for IEX Listed Securities.
https://iexcloud.io/docs/api/#deep-official-price
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
dict: result
'''
_raiseIfNotStr(symbol)
if symbol:
return _getJson('deep/official-price?symbols=' + symbol, token, version)
return _getJson('deep/official-price', token, version)
def officialPriceDF(symbol=None, token='', version=''):
'''The Official Price message is used to disseminate the IEX Official Opening and Closing Prices.
These messages will be provided only for IEX Listed Securities.
https://iexcloud.io/docs/api/#deep-official-price
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
df = pd.io.json.json_normalize(officialPrice(symbol, token, version))
_toDatetime(df)
return df
def securityEvent(symbol=None, token='', version=''):
'''The Security event message is used to indicate events that apply to a security. A Security event message will be sent whenever such event occurs
https://iexcloud.io/docs/api/#deep-security-event
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
dict: result
'''
_raiseIfNotStr(symbol)
if symbol:
return _getJson('deep/security-event?symbols=' + symbol, token, version)
return _getJson('deep/security-event', token, version)
def securityEventDF(symbol=None, token='', version=''):
'''The Security event message is used to indicate events that apply to a security. A Security event message will be sent whenever such event occurs
https://iexcloud.io/docs/api/#deep-security-event
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
x = securityEvent(symbol, token, version)
data = []
for key in x:
d = x[key]
d['symbol'] = key
data.append(d)
df = | pd.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 13 12:52:08 2020
@author: kakdemi
"""
import pandas as pd
sim_2015 = pd.read_csv('shadow_price_2015.csv', usecols= ['Constraint', 'Time', 'Value'])
sim_2016 = pd.read_csv('shadow_price_2016.csv', usecols= ['Constraint', 'Time', 'Value'])
sim_2017 = pd.read_csv('shadow_price_2017.csv', usecols= ['Constraint', 'Time', 'Value'])
bal_list = ['Bal1Constraint', 'Bal2Constraint', 'Bal3Constraint', 'Bal4Constraint', 'Bal5Constraint', 'Bal6Constraint', 'Bal7Constraint', 'Bal8Constraint']
for b in bal_list:
name_1 = b + '_2015'
globals()[name_1] = sim_2015.loc[sim_2015['Constraint'] == b]
globals()[name_1].reset_index(inplace=True)
del globals()[name_1]['Time']
name_2 = b + '_2016'
globals()[name_2] = sim_2016.loc[sim_2016['Constraint'] == b]
globals()[name_2].reset_index(inplace=True)
del globals()[name_2]['Time']
name_3 = b + '_2017'
globals()[name_3] = sim_2017.loc[sim_2017['Constraint'] == b]
globals()[name_3].reset_index(inplace=True)
del globals()[name_3]['Time']
name_t = b + '_total'
globals()[name_t] = pd.concat([globals()[name_1], globals()[name_2], globals()[name_3]])
globals()[name_t].reset_index(inplace=True, drop=True)
del globals()[name_t]['index']
Total_sim = pd.concat([Bal1Constraint_total['Value'], Bal2Constraint_total['Value'], Bal3Constraint_total['Value'], Bal4Constraint_total['Value'], Bal5Constraint_total['Value'], Bal6Constraint_total['Value'], Bal7Constraint_total['Value'], Bal8Constraint_total['Value']], axis=1)
Total_sim.columns = ['Bal1Constraint', 'Bal2Constraint', 'Bal3Constraint', 'Bal4Constraint', 'Bal5Constraint', 'Bal6Constraint', 'Bal7Constraint', 'Bal8Constraint']
hist_hourly_2015 = pd.read_excel('hist_hourly_2015.xls', sheet_name = 'ISONE CA', usecols = ['DA_LMP'])
hist_hourly_2016 = pd.read_excel('hist_hourly_2016.xls', sheet_name = 'ISO NE CA', usecols = ['DA_LMP'])
hist_hourly_2017 = pd.read_excel('hist_hourly_2017.xlsx', sheet_name = 'ISO NE CA', usecols = ['DA_LMP'])
hist_daily_2015 = pd.read_excel('hist_daily_2015.xls', sheet_name = 'ISONE CA', usecols = ['AvgDALMP'])
hist_daily_2016 = pd.read_excel('hist_daily_2016.xlsx', sheet_name = 'ISO NE CA', usecols = ['Avg_DA_LMP'])
hist_daily_2017 = pd.read_excel('hist_daily_2017.xlsx', sheet_name = 'ISO NE CA', usecols = ['Avg_DA_LMP'])
hist_daily_2016.columns = ['AvgDALMP']
hist_daily_2017.columns = ['AvgDALMP']
hist_hourly = pd.concat([hist_hourly_2015, hist_hourly_2016, hist_hourly_2017])
hist_hourly.columns = ['LMP']
hist_hourly.reset_index(inplace=True, drop=True)
hist_daily = pd.concat([hist_daily_2015, hist_daily_2016, hist_daily_2017])
hist_daily.columns = ['LMP']
hist_daily.reset_index(inplace=True, drop=True)
for year in range(3):
hist_1 = hist_hourly.loc[year*8760:year*8760+8759, 'LMP'].copy()
hist_1.reset_index(drop=True, inplace=True)
globals()['Hourly_'+str(year)] = pd.DataFrame(hist_1).copy()
hist_hourly_final = pd.concat([Hourly_0, Hourly_1, Hourly_2])
hist_hourly_final.reset_index(inplace=True, drop=True)
for year in range(3):
hist_2 = hist_daily.loc[year*365:year*365+364, 'LMP'].copy()
hist_2.reset_index(drop=True, inplace=True)
globals()['Daily_'+str(year)] = pd.DataFrame(hist_2).copy()
hist_daily_final = | pd.concat([Daily_0, Daily_1, Daily_2]) | pandas.concat |
import os
import tempfile
import unittest
import pandas as pd
from glhe.output_processor.output_processor import OutputProcessor
class TestOutputProcessor(unittest.TestCase):
@staticmethod
def add_instance():
temp_dir = tempfile.mkdtemp()
temp_file_name = 'temp.csv'
return OutputProcessor(temp_dir, temp_file_name)
def test_collect_output(self):
tst = self.add_instance()
d = {'foo': 1,
'bar': 2}
tst.collect_output(d)
self.assertEqual(tst.df['foo'][0], 1)
self.assertEqual(tst.df['bar'][0], 2)
def test_write_to_file(self):
tst = self.add_instance()
d = {'foo': 1,
'bar': 2}
tst.collect_output(d)
tst.write_to_file()
# check that the file was written
self.assertTrue(os.path.exists(tst.write_path))
# make sure the data comes out right
df = | pd.read_csv(tst.write_path) | pandas.read_csv |
# /usr/bin/env python3
#en Pandas existen 3 tios fundamentales de datos estructurados
#las Series, DataFrames, Index
import pandas as pd
import numpy as np
def series():
data=pd.Series([12,34,56,7.8])
print(data)
data=pd.Series([12,34,565.33,23,44,122], index=["Notas","Edad","Grande","code","Gemelas","122N"])
print(data)
print(data.values)
print(data.index)
#se puede construir un objeto Series de un objeto Dict en python
def series_dict():
population_dict={'China':123000000.2,
'Colombia':2340000,
'Peru':24240000,
'Chile':769000000,
'Rusia':120000000
}
data=pd.Series(population_dict)
print(data)
print(data['China'])
data['China']+=1
print(data['China'])
#un Series acepta como parametro principal un objeto 'data' que puede ser un diccionario
#lista,tupla array Numpy
#Se pueden especificar indices explicitos para cada tipo de dato pd.Series(data,index=[something,something])
def dataframes():
#podemos crear dataframes atraves de:
#Series pd.DataFrame(serie,columns=['somthing_name'])
#2 o mas Series (exlicados aqui)
#Especializacion de diccionarios
#Matrices bidmensionales de Numpy np.DataFrame(np.random.randint(1,56,size=(2,3)),columns=['a','b','c'],index=[1,2])
population_dict={'China':120000000,
'Peru':90000,
'Chile':89098830,
'Colombia':120000,
'Argentina':1299000
}
infected_dict={
'China':890000,
'Peru':900,
'Chile':89000,
'Colombia':52009,
'Argentina':1230
}
population=pd.Series(population_dict)
infected= | pd.Series(infected_dict) | pandas.Series |
"""
Copyright 2018 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime as dt
import pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal, assert_series_equal
from gs_quant.api.gs.data import GsDataApi
from gs_quant.context_base import ContextMeta
from gs_quant.errors import MqValueError
from gs_quant.markets import MarketDataCoordinate
from gs_quant.session import GsSession, Environment
from gs_quant.target.assets import FieldFilterMap
from gs_quant.target.coordinates import MDAPIDataQuery
from gs_quant.target.data import MarketDataVendor, DataSetEntity, DataQuery, DataSetFieldEntity
test_coordinates = (
MarketDataCoordinate(mkt_type='Prime', mkt_quoting_style='price', mkt_asset='335320934'),
MarketDataCoordinate(mkt_type='IR', mkt_asset='USD', mkt_class='Swap', mkt_point=('2Y',)),
)
test_str_coordinates = (
'Prime_335320934_.price',
'IR_USD_Swap_2Y'
)
test_defn_dict = {'id': 'EXAMPLE_FROM_SLANG',
'name': 'Example DataSet',
'description': 'This is a test.',
'shortDescription': '',
'vendor': 'Goldman Sachs',
'dataProduct': 'TEST',
'entitlements': {'query': ['internal'],
'view': ['internal', 'role:DataServiceView', 'role:DataServiceAdmin'],
'upload': ['internal'],
'admin': ['internal', 'role:DataServiceAdmin'],
'edit': ['internal', 'role:DataServiceAdmin']},
'parameters': {'methodology': '',
'coverage': '',
'notes': '',
'history': '',
'frequency': '',
'applyMarketDataEntitlements': False,
'uploadDataPolicy': 'DEFAULT_POLICY',
'logicalDb': 'STUDIO_DAILY',
'symbolStrategy': 'ARCTIC_LINK',
'immutable': False,
'includeInCatalog': False,
'coverageEnabled': True},
'dimensions': {'timeField': 'date',
'transactionTimeField': 'updateTime',
'symbolDimensions': ['assetId'],
'nonSymbolDimensions': [{'field': 'price', 'column': 'PRICE'}],
'measures': [{'field': 'updateTime', 'column': 'UPDATE_TIME'}],
'entityDimension': 'assetId'},
'defaults': {'startSeconds': 2592000.0},
'createdById': '9eb7226166a44236905cae2913cfbd3c',
'createdTime': '2018-07-24T00:32:25.77Z',
'lastUpdatedById': '4ad8ebb6480d49e6b2e9eea9210685cf',
'lastUpdatedTime': '2019-10-24T14:20:13.653Z'}
bond_data = [
{
'mktType': 'Prime',
'mktAsset': '335320934',
'mktQuotingStyle': 'price',
'price': 1.0139,
'time': pd.to_datetime('2019-01-20T01:03:00Z')
},
{
'mktType': 'Prime',
'mktAsset': '335320934',
'mktQuotingStyle': 'price',
'price': 1.0141,
'time': pd.to_datetime('2019-01-20T01:08:00Z')
}
]
swap_data = [
{
'mktType': 'IR',
'mktAsset': 'USD',
'mktClass': 'Swap',
'mktPoint': ('2Y',),
'mktQuotingStyle': 'ATMRate',
'ATMRate': 0.02592,
'time': pd.to_datetime('2019-01-20T01:09:45Z')
}
]
bond_expected_frame = pd.DataFrame(
data={
'time': [pd.to_datetime('2019-01-20T01:03:00Z'), pd.to_datetime('2019-01-20T01:08:00Z')],
'mktType': ['Prime', 'Prime'],
'mktAsset': ['335320934', '335320934'],
'mktQuotingStyle': ['price', 'price'],
'value': [1.0139, 1.0141]
},
index=pd.DatetimeIndex(['2019-01-20T01:03:00', '2019-01-20T01:08:00']),
)
swap_expected_frame = pd.DataFrame(
data={
'time': [pd.to_datetime('2019-01-20T01:09:45Z')],
'mktType': ['IR'],
'mktAsset': ['USD'],
'mktClass': ['Swap'],
'mktPoint': [('2Y',)],
'mktQuotingStyle': ['ATMRate'],
'value': [0.02592]
},
index=pd.DatetimeIndex(['2019-01-20T01:09:45']),
)
def test_coordinates_data(mocker):
start = dt.datetime(2019, 1, 2, 1, 0)
end = dt.datetime(2019, 1, 2, 1, 10)
# mock GsSession and data response
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_post', side_effect=[{'responses': [{'data': bond_data}]},
{'responses': [{'data': swap_data}]},
{'responses': [{'data': bond_data},
{'data': swap_data}]},
{'responses': [{'data': bond_data},
{'data': swap_data}]}
])
coord_data_result = GsDataApi.coordinates_data(coordinates=test_coordinates[0], start=start, end=end)
assert_frame_equal(coord_data_result, bond_expected_frame)
str_coord_data_result = GsDataApi.coordinates_data(coordinates=test_str_coordinates[1], start=start, end=end)
assert_frame_equal(str_coord_data_result, swap_expected_frame)
coords_data_result = GsDataApi.coordinates_data(coordinates=test_coordinates, start=start, end=end,
as_multiple_dataframes=True)
assert len(coords_data_result) == 2
assert_frame_equal(coords_data_result[0], bond_expected_frame)
assert_frame_equal(coords_data_result[1], swap_expected_frame)
GsSession.current._post.reset_mock()
str_coords_data_result = GsDataApi.coordinates_data(coordinates=test_str_coordinates, start=start, end=end,
as_multiple_dataframes=True)
assert len(str_coords_data_result) == 2
assert_frame_equal(str_coords_data_result[0], bond_expected_frame)
assert_frame_equal(str_coords_data_result[1], swap_expected_frame)
GsSession.current._post.assert_called_once_with('/data/coordinates/query',
payload=MDAPIDataQuery(market_data_coordinates=test_coordinates,
start_time=start,
end_time=end,
vendor=MarketDataVendor.Goldman_Sachs,
format="MessagePack")
)
def test_coordinate_data_series(mocker):
start = dt.datetime(2019, 1, 2, 1, 0)
end = dt.datetime(2019, 1, 2, 1, 10)
# mock GsSession and data response
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_post', side_effect=[{'responses': [{'data': bond_data}]},
{'responses': [{'data': swap_data}]},
{'responses': [{'data': bond_data},
{'data': swap_data}]},
{'responses': [{'data': bond_data},
{'data': swap_data}]}
])
bond_expected_series = pd.Series(index=bond_expected_frame.index, data=bond_expected_frame.value.values)
swap_expected_series = | pd.Series(index=swap_expected_frame.index, data=swap_expected_frame.value.values) | pandas.Series |
#!/usr/bin/env python
from __future__ import division
import sys, os, matplotlib
import numpy as np
import readnew
from glob import glob
import matplotlib.pyplot as plt
matplotlib.rcParams['text.usetex'] = True
matplotlib.rc('font', family='serif')
matplotlib.rcParams['figure.figsize'] = (5, 4)
import yaml
import os.path
import time # Need to wait some time if file is being written
import argparse
import colors
import pandas as pd
parser = argparse.ArgumentParser(description =
"""
This script functions by reading in a .CSV file using
Pandas and plots the data.
EXAMPLE USAGE:
python3 scripts-python/ising-cv-plot.py
--file_dir=../ising-cp-data/
--save_dir=ising/data/heat-capacity
--filename ising-sad-32
--N=32
""")
parser.add_argument('--file_dir', required=True,
help='The directory where the files are located. \
Example:/home/jordan/ising-cp-data/')
# parser.add_argument('--save_dir', required=True,
# help='The directory where the data will be saved. \
# Exmaple:data/comparison/N32')
parser.add_argument('--N', type=int, required=True,
help='The length of the ising model. Used to divide \
the number of moves.')
# parser.add_argument('--filename', required=True,
# help='The filename to perform analysis on. \
# Exmaple:ising-sad-32')
parser.add_argument('--noshow', action='store_true',
help='Whether to show the figure or not')
args = parser.parse_args()
# Rename all argparse parameters.
file_dir = args.file_dir
# save_dir = args.save_dir
N = args.N
# filename = args.filename
if args.noshow==True:
matplotlib.use('Agg')
print('true?')
cv_data = pd.read_csv('%s/N%s-heat-capacity.csv' % (file_dir,N),delimiter='\t',encoding='utf-8',engine='python')
#cv_data['cvref'] = cv_data['cvref'].astype(float)
print(cv_data.head(10))
cv_headers = list(cv_data)[1:]
print(cv_headers)
# Begin plotting the heat capacity
plt.figure('heat capacity plot')
Temp = np.array( | pd.to_numeric(cv_data['Temperature'], errors='coerce') | pandas.to_numeric |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
### General imports ###
from __future__ import division
import numpy as np
import pandas as pd
import time
import re
import os
from collections import Counter
import altair as alt
### Flask imports
import requests
from flask import Flask, render_template, session, request, redirect, flash, Response
### Video imports ###
from library.video_emotion_recognition import *
### Text imports ###
from library.text_emotion_recognition import *
from library.text_preprocessor import *
from nltk import *
from tika import parser
from werkzeug.utils import secure_filename
import tempfile
# Flask config
app = Flask(__name__)
app.secret_key = b'(\<KEY>'
app.config['UPLOAD_FOLDER'] = '/Upload'
# Home page
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
################################## RULES #######################################
# Rules of the game
@app.route('/rules')
def rules():
return render_template('rules.html')
############################### VIDEO Analysis ################################
# Read the overall dataframe before the user starts to add his own data
df = pd.read_csv('static/js/db/histo.txt', sep=",")
# Video interview template
@app.route('/video', methods=['POST'])
def video() :
# Display a warning message
flash('You will have 45 seconds to discuss the topic mentioned above. Due to restrictions, we are not able to redirect you once the video is over. Please move your URL to /video_dash instead of /video_1 once over. You will be able to see your results then.')
return render_template('video.html')
# Display the video flow (face, landmarks, emotion)
@app.route('/video_1', methods=['POST'])
def video_1() :
try :
# Response is used to display a flow of information
return Response(gen(),mimetype='multipart/x-mixed-replace; boundary=frame')
#return Response(stream_template('video.html', gen()))
except :
return None
# Dashboard
@app.route('/video_dash', methods=("POST", "GET"))
def video_dash():
# Load personal history
df_2 = pd.read_csv('static/js/db/histo_perso.txt')
def emo_prop(df_2) :
return [int(100*len(df_2[df_2.density==0])/len(df_2)),
int(100*len(df_2[df_2.density==1])/len(df_2)),
int(100*len(df_2[df_2.density==2])/len(df_2)),
int(100*len(df_2[df_2.density==3])/len(df_2)),
int(100*len(df_2[df_2.density==4])/len(df_2)),
int(100*len(df_2[df_2.density==5])/len(df_2)),
int(100*len(df_2[df_2.density==6])/len(df_2))]
emotions = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"]
emo_perso = {}
emo_glob = {}
for i in range(len(emotions)) :
emo_perso[emotions[i]] = len(df_2[df_2.density==i])
emo_glob[emotions[i]] = len(df[df.density==i])
df_perso = pd.DataFrame.from_dict(emo_perso, orient='index')
df_perso = df_perso.reset_index()
df_perso.columns = ['EMOTION', 'VALUE']
df_perso.to_csv('static/js/db/hist_vid_perso.txt', sep=",", index=False)
df_glob = pd.DataFrame.from_dict(emo_glob, orient='index')
df_glob = df_glob.reset_index()
df_glob.columns = ['EMOTION', 'VALUE']
df_glob.to_csv('static/js/db/hist_vid_glob.txt', sep=",", index=False)
emotion = df_2.density.mode()[0]
emotion_other = df.density.mode()[0]
def emotion_label(emotion) :
if emotion == 0 :
return "Angry"
elif emotion == 1 :
return "Disgust"
elif emotion == 2 :
return "Fear"
elif emotion == 3 :
return "Happy"
elif emotion == 4 :
return "Sad"
elif emotion == 5 :
return "Surprise"
else :
return "Neutral"
### Altair Plot
df_altair = pd.read_csv('static/js/db/prob.csv', header=None, index_col=None).reset_index()
df_altair.columns = ['Time', 'Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
angry = alt.Chart(df_altair).mark_line(color='orange', strokeWidth=2).encode(
x='Time:Q',
y='Angry:Q',
tooltip=["Angry"]
)
disgust = alt.Chart(df_altair).mark_line(color='red', strokeWidth=2).encode(
x='Time:Q',
y='Disgust:Q',
tooltip=["Disgust"])
fear = alt.Chart(df_altair).mark_line(color='green', strokeWidth=2).encode(
x='Time:Q',
y='Fear:Q',
tooltip=["Fear"])
happy = alt.Chart(df_altair).mark_line(color='blue', strokeWidth=2).encode(
x='Time:Q',
y='Happy:Q',
tooltip=["Happy"])
sad = alt.Chart(df_altair).mark_line(color='black', strokeWidth=2).encode(
x='Time:Q',
y='Sad:Q',
tooltip=["Sad"])
surprise = alt.Chart(df_altair).mark_line(color='pink', strokeWidth=2).encode(
x='Time:Q',
y='Surprise:Q',
tooltip=["Surprise"])
neutral = alt.Chart(df_altair).mark_line(color='brown', strokeWidth=2).encode(
x='Time:Q',
y='Neutral:Q',
tooltip=["Neutral"])
chart = (angry + disgust + fear + happy + sad + surprise + neutral).properties(
width=1000, height=400, title='Probability of each emotion over time')
chart.save('static/CSS/chart.html')
return render_template('video_dash.html', emo=emotion_label(emotion), emo_other = emotion_label(emotion_other), prob = emo_prop(df_2), prob_other = emo_prop(df))
############################### Text Analysis #################################
global df_text
tempdirectory = tempfile.gettempdir()
@app.route('/text', methods=['POST'])
def text() :
return render_template('text.html')
def get_personality(text):
try:
pred = predict().run(text, model_name = "Personality_traits_NN")
return pred
except KeyError:
return None
def get_text_info(text):
text = text[0]
words = wordpunct_tokenize(text)
common_words = FreqDist(words).most_common(100)
counts = Counter(words)
num_words = len(text.split())
return common_words, num_words, counts
def preprocess_text(text):
preprocessed_texts = NLTKPreprocessor().transform([text])
return preprocessed_texts
@app.route('/text_1', methods=['POST'])
def text_1():
text = request.form.get('text')
traits = ['Extraversion', 'Neuroticism', 'Agreeableness', 'Conscientiousness', 'Openness']
probas = get_personality(text)[0].tolist()
df_text = pd.read_csv('static/js/db/text.txt', sep=",")
df_new = df_text.append(pd.DataFrame([probas], columns=traits))
df_new.to_csv('static/js/db/text.txt', sep=",", index=False)
perso = {}
perso['Extraversion'] = probas[0]
perso['Neuroticism'] = probas[1]
perso['Agreeableness'] = probas[2]
perso['Conscientiousness'] = probas[3]
perso['Openness'] = probas[4]
df_text_perso = | pd.DataFrame.from_dict(perso, orient='index') | pandas.DataFrame.from_dict |
'''
Preprocessing Tranformers Based on sci-kit's API
By <NAME>
Created on June 12, 2017
'''
import copy
import pandas as pd
import numpy as np
import transforms3d as t3d
import scipy.ndimage.filters as filters
from sklearn.base import BaseEstimator, TransformerMixin
from analysis.pymo.rotation_tools import Rotation, euler2expmap, euler2expmap2, expmap2euler, euler_reorder, unroll
from analysis.pymo.Quaternions import Quaternions
from analysis.pymo.Pivots import Pivots
class MocapParameterizer(BaseEstimator, TransformerMixin):
def __init__(self, param_type = 'euler'):
'''
param_type = {'euler', 'quat', 'expmap', 'position', 'expmap2pos'}
'''
self.param_type = param_type
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
print("MocapParameterizer: " + self.param_type)
if self.param_type == 'euler':
return X
elif self.param_type == 'expmap':
return self._to_expmap(X)
elif self.param_type == 'quat':
return X
elif self.param_type == 'position':
return self._to_pos(X)
elif self.param_type == 'expmap2pos':
return self._expmap_to_pos(X)
else:
raise 'param types: euler, quat, expmap, position, expmap2pos'
# return X
def inverse_transform(self, X, copy=None):
if self.param_type == 'euler':
return X
elif self.param_type == 'expmap':
return self._expmap_to_euler(X)
elif self.param_type == 'quat':
raise 'quat2euler is not supported'
elif self.param_type == 'position':
# raise 'positions 2 eulers is not supported'
print('positions 2 eulers is not supported')
return X
else:
raise 'param types: euler, quat, expmap, position'
def _to_pos(self, X):
'''Converts joints rotations in Euler angles to joint positions'''
Q = []
for track in X:
channels = []
titles = []
euler_df = track.values
# Create a new DataFrame to store the exponential map rep
pos_df = pd.DataFrame(index=euler_df.index)
# Copy the root rotations into the new DataFrame
# rxp = '%s_Xrotation'%track.root_name
# ryp = '%s_Yrotation'%track.root_name
# rzp = '%s_Zrotation'%track.root_name
# pos_df[rxp] = pd.Series(data=euler_df[rxp], index=pos_df.index)
# pos_df[ryp] = pd.Series(data=euler_df[ryp], index=pos_df.index)
# pos_df[rzp] = pd.Series(data=euler_df[rzp], index=pos_df.index)
# List the columns that contain rotation channels
rot_cols = [c for c in euler_df.columns if ('rotation' in c)]
# List the columns that contain position channels
pos_cols = [c for c in euler_df.columns if ('position' in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton)
tree_data = {}
for joint in track.traverse():
parent = track.skeleton[joint]['parent']
rot_order = track.skeleton[joint]['order']
#print("rot_order:" + joint + " :" + rot_order)
# Get the rotation columns that belong to this joint
rc = euler_df[[c for c in rot_cols if joint in c]]
# Get the position columns that belong to this joint
pc = euler_df[[c for c in pos_cols if joint in c]]
# Make sure the columns are organized in xyz order
if rc.shape[1] < 3:
euler_values = np.zeros((euler_df.shape[0], 3))
rot_order = "XYZ"
else:
euler_values = np.pi/180.0*np.transpose(np.array([track.values['%s_%srotation'%(joint, rot_order[0])], track.values['%s_%srotation'%(joint, rot_order[1])], track.values['%s_%srotation'%(joint, rot_order[2])]]))
if pc.shape[1] < 3:
pos_values = np.asarray([[0,0,0] for f in pc.iterrows()])
else:
pos_values =np.asarray([[f[1]['%s_Xposition'%joint],
f[1]['%s_Yposition'%joint],
f[1]['%s_Zposition'%joint]] for f in pc.iterrows()])
quats = Quaternions.from_euler(np.asarray(euler_values), order=rot_order.lower(), world=False)
tree_data[joint]=[
[], # to store the rotation matrix
[] # to store the calculated position
]
if track.root_name == joint:
tree_data[joint][0] = quats#rotmats
# tree_data[joint][1] = np.add(pos_values, track.skeleton[joint]['offsets'])
tree_data[joint][1] = pos_values
else:
# for every frame i, multiply this joint's rotmat to the rotmat of its parent
tree_data[joint][0] = tree_data[parent][0]*quats# np.matmul(rotmats, tree_data[parent][0])
# add the position channel to the offset and store it in k, for every frame i
k = pos_values + np.asarray(track.skeleton[joint]['offsets'])
# multiply k to the rotmat of the parent for every frame i
q = tree_data[parent][0]*k #np.matmul(k.reshape(k.shape[0],1,3), tree_data[parent][0])
# add q to the position of the parent, for every frame i
tree_data[joint][1] = tree_data[parent][1] + q #q.reshape(k.shape[0],3) + tree_data[parent][1]
# Create the corresponding columns in the new DataFrame
pos_df['%s_Xposition'%joint] = pd.Series(data=[e[0] for e in tree_data[joint][1]], index=pos_df.index)
pos_df['%s_Yposition'%joint] = pd.Series(data=[e[1] for e in tree_data[joint][1]], index=pos_df.index)
pos_df['%s_Zposition'%joint] = pd.Series(data=[e[2] for e in tree_data[joint][1]], index=pos_df.index)
new_track = track.clone()
new_track.values = pos_df
Q.append(new_track)
return Q
def _expmap2rot(self, expmap):
theta = np.linalg.norm(expmap, axis=1, keepdims=True)
nz = np.nonzero(theta)[0]
expmap[nz,:] = expmap[nz,:]/theta[nz]
nrows=expmap.shape[0]
x = expmap[:,0]
y = expmap[:,1]
z = expmap[:,2]
s = np.sin(theta*0.5).reshape(nrows)
c = np.cos(theta*0.5).reshape(nrows)
rotmats = np.zeros((nrows, 3, 3))
rotmats[:,0,0] = 2*(x*x-1)*s*s+1
rotmats[:,0,1] = 2*x*y*s*s-2*z*c*s
rotmats[:,0,2] = 2*x*z*s*s+2*y*c*s
rotmats[:,1,0] = 2*x*y*s*s+2*z*c*s
rotmats[:,1,1] = 2*(y*y-1)*s*s+1
rotmats[:,1,2] = 2*y*z*s*s-2*x*c*s
rotmats[:,2,0] = 2*x*z*s*s-2*y*c*s
rotmats[:,2,1] = 2*y*z*s*s+2*x*c*s
rotmats[:,2,2] = 2*(z*z-1)*s*s+1
return rotmats
def _expmap_to_pos(self, X):
'''Converts joints rotations in expmap notation to joint positions'''
Q = []
for track in X:
channels = []
titles = []
exp_df = track.values
# Create a new DataFrame to store the exponential map rep
pos_df = pd.DataFrame(index=exp_df.index)
# Copy the root rotations into the new DataFrame
# rxp = '%s_Xrotation'%track.root_name
# ryp = '%s_Yrotation'%track.root_name
# rzp = '%s_Zrotation'%track.root_name
# pos_df[rxp] = pd.Series(data=euler_df[rxp], index=pos_df.index)
# pos_df[ryp] = pd.Series(data=euler_df[ryp], index=pos_df.index)
# pos_df[rzp] = pd.Series(data=euler_df[rzp], index=pos_df.index)
# List the columns that contain rotation channels
exp_params = [c for c in exp_df.columns if ( any(p in c for p in ['alpha', 'beta','gamma']) and 'Nub' not in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton)
tree_data = {}
for joint in track.traverse():
parent = track.skeleton[joint]['parent']
if 'Nub' not in joint:
r = exp_df[[c for c in exp_params if joint in c]] # Get the columns that belong to this joint
expmap = r.values
#expmap = [[f[1]['%s_alpha'%joint], f[1]['%s_beta'%joint], f[1]['%s_gamma'%joint]] for f in r.iterrows()]
else:
expmap = np.zeros((exp_df.shape[0], 3))
# Convert the eulers to rotation matrices
#rotmats = np.asarray([Rotation(f, 'expmap').rotmat for f in expmap])
#angs = np.linalg.norm(expmap,axis=1, keepdims=True)
rotmats = self._expmap2rot(expmap)
tree_data[joint]=[
[], # to store the rotation matrix
[] # to store the calculated position
]
pos_values = np.zeros((exp_df.shape[0], 3))
if track.root_name == joint:
tree_data[joint][0] = rotmats
# tree_data[joint][1] = np.add(pos_values, track.skeleton[joint]['offsets'])
tree_data[joint][1] = pos_values
else:
# for every frame i, multiply this joint's rotmat to the rotmat of its parent
tree_data[joint][0] = np.matmul(rotmats, tree_data[parent][0])
# add the position channel to the offset and store it in k, for every frame i
k = pos_values + track.skeleton[joint]['offsets']
# multiply k to the rotmat of the parent for every frame i
q = np.matmul(k.reshape(k.shape[0],1,3), tree_data[parent][0])
# add q to the position of the parent, for every frame i
tree_data[joint][1] = q.reshape(k.shape[0],3) + tree_data[parent][1]
# Create the corresponding columns in the new DataFrame
pos_df['%s_Xposition'%joint] = pd.Series(data=tree_data[joint][1][:,0], index=pos_df.index)
pos_df['%s_Yposition'%joint] = pd.Series(data=tree_data[joint][1][:,1], index=pos_df.index)
pos_df['%s_Zposition'%joint] = pd.Series(data=tree_data[joint][1][:,2], index=pos_df.index)
new_track = track.clone()
new_track.values = pos_df
Q.append(new_track)
return Q
def _to_expmap(self, X):
'''Converts Euler angles to Exponential Maps'''
Q = []
for track in X:
channels = []
titles = []
euler_df = track.values
# Create a new DataFrame to store the exponential map rep
exp_df = euler_df.copy()# pd.DataFrame(index=euler_df.index)
# Copy the root positions into the new DataFrame
#rxp = '%s_Xposition'%track.root_name
#ryp = '%s_Yposition'%track.root_name
#rzp = '%s_Zposition'%track.root_name
#exp_df[rxp] = pd.Series(data=euler_df[rxp], index=exp_df.index)
#exp_df[ryp] = pd.Series(data=euler_df[ryp], index=exp_df.index)
#exp_df[rzp] = pd.Series(data=euler_df[rzp], index=exp_df.index)
# List the columns that contain rotation channels
rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton if 'Nub' not in joint)
for joint in joints:
#print(joint)
r = euler_df[[c for c in rots if joint in c]] # Get the columns that belong to this joint
rot_order = track.skeleton[joint]['order']
r1_col = '%s_%srotation'%(joint, rot_order[0])
r2_col = '%s_%srotation'%(joint, rot_order[1])
r3_col = '%s_%srotation'%(joint, rot_order[2])
exp_df.drop([r1_col, r2_col, r3_col], axis=1, inplace=True)
euler = [[f[1][r1_col], f[1][r2_col], f[1][r3_col]] for f in r.iterrows()]
#exps = [Rotation(f, 'euler', from_deg=True, order=rot_order).to_expmap() for f in euler] # Convert the eulers to exp maps
exps = unroll(np.array([euler2expmap(f, rot_order, True) for f in euler])) # Convert the exp maps to eulers
# exps = np.array([euler2expmap(f, rot_order, True) for f in euler]) # Convert the exp maps to eulers
#exps = euler2expmap2(euler, rot_order, True) # Convert the eulers to exp maps
# Create the corresponding columns in the new DataFrame
exp_df.insert(loc=0, column='%s_gamma'%joint, value=pd.Series(data=[e[2] for e in exps], index=exp_df.index))
exp_df.insert(loc=0, column='%s_beta'%joint, value=pd.Series(data=[e[1] for e in exps], index=exp_df.index))
exp_df.insert(loc=0, column='%s_alpha'%joint, value=pd.Series(data=[e[0] for e in exps], index=exp_df.index))
#print(exp_df.columns)
new_track = track.clone()
new_track.values = exp_df
Q.append(new_track)
return Q
def _expmap_to_euler(self, X):
Q = []
for track in X:
channels = []
titles = []
exp_df = track.values
# Create a new DataFrame to store the exponential map rep
#euler_df = pd.DataFrame(index=exp_df.index)
euler_df = exp_df.copy()
# Copy the root positions into the new DataFrame
#rxp = '%s_Xposition'%track.root_name
#ryp = '%s_Yposition'%track.root_name
#rzp = '%s_Zposition'%track.root_name
#euler_df[rxp] = pd.Series(data=exp_df[rxp], index=euler_df.index)
#euler_df[ryp] = pd.Series(data=exp_df[ryp], index=euler_df.index)
#euler_df[rzp] = pd.Series(data=exp_df[rzp], index=euler_df.index)
# List the columns that contain rotation channels
exp_params = [c for c in exp_df.columns if ( any(p in c for p in ['alpha', 'beta','gamma']) and 'Nub' not in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton if 'Nub' not in joint)
for joint in joints:
r = exp_df[[c for c in exp_params if joint in c]] # Get the columns that belong to this joint
euler_df.drop(['%s_alpha'%joint, '%s_beta'%joint, '%s_gamma'%joint], axis=1, inplace=True)
expmap = [[f[1]['%s_alpha'%joint], f[1]['%s_beta'%joint], f[1]['%s_gamma'%joint]] for f in r.iterrows()] # Make sure the columsn are organized in xyz order
rot_order = track.skeleton[joint]['order']
#euler_rots = [Rotation(f, 'expmap').to_euler(True, rot_order) for f in expmap] # Convert the exp maps to eulers
euler_rots = [expmap2euler(f, rot_order, True) for f in expmap] # Convert the exp maps to eulers
# Create the corresponding columns in the new DataFrame
euler_df['%s_%srotation'%(joint, rot_order[0])] = pd.Series(data=[e[0] for e in euler_rots], index=euler_df.index)
euler_df['%s_%srotation'%(joint, rot_order[1])] = pd.Series(data=[e[1] for e in euler_rots], index=euler_df.index)
euler_df['%s_%srotation'%(joint, rot_order[2])] = pd.Series(data=[e[2] for e in euler_rots], index=euler_df.index)
new_track = track.clone()
new_track.values = euler_df
Q.append(new_track)
return Q
class Mirror(BaseEstimator, TransformerMixin):
def __init__(self, axis="X", append=True):
"""
Mirrors the data
"""
self.axis = axis
self.append = append
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
print("Mirror: " + self.axis)
Q = []
if self.append:
for track in X:
Q.append(track)
for track in X:
channels = []
titles = []
if self.axis == "X":
signs = np.array([1,-1,-1])
if self.axis == "Y":
signs = np.array([-1,1,-1])
if self.axis == "Z":
signs = np.array([-1,-1,1])
euler_df = track.values
# Create a new DataFrame to store the exponential map rep
new_df = pd.DataFrame(index=euler_df.index)
# Copy the root positions into the new DataFrame
rxp = '%s_Xposition'%track.root_name
ryp = '%s_Yposition'%track.root_name
rzp = '%s_Zposition'%track.root_name
new_df[rxp] = pd.Series(data=-signs[0]*euler_df[rxp], index=new_df.index)
new_df[ryp] = pd.Series(data=-signs[1]*euler_df[ryp], index=new_df.index)
new_df[rzp] = pd.Series(data=-signs[2]*euler_df[rzp], index=new_df.index)
# List the columns that contain rotation channels
rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]
#lft_rots = [c for c in euler_df.columns if ('Left' in c and 'rotation' in c and 'Nub' not in c)]
#rgt_rots = [c for c in euler_df.columns if ('Right' in c and 'rotation' in c and 'Nub' not in c)]
lft_joints = (joint for joint in track.skeleton if 'Left' in joint and 'Nub' not in joint)
rgt_joints = (joint for joint in track.skeleton if 'Right' in joint and 'Nub' not in joint)
new_track = track.clone()
for lft_joint in lft_joints:
#lr = euler_df[[c for c in rots if lft_joint + "_" in c]]
#rot_order = track.skeleton[lft_joint]['order']
#lft_eulers = [[f[1]['%s_Xrotation'%lft_joint], f[1]['%s_Yrotation'%lft_joint], f[1]['%s_Zrotation'%lft_joint]] for f in lr.iterrows()]
rgt_joint = lft_joint.replace('Left', 'Right')
#rr = euler_df[[c for c in rots if rgt_joint + "_" in c]]
#rot_order = track.skeleton[rgt_joint]['order']
# rgt_eulers = [[f[1]['%s_Xrotation'%rgt_joint], f[1]['%s_Yrotation'%rgt_joint], f[1]['%s_Zrotation'%rgt_joint]] for f in rr.iterrows()]
# Create the corresponding columns in the new DataFrame
new_df['%s_Xrotation'%lft_joint] = pd.Series(data=signs[0]*track.values['%s_Xrotation'%rgt_joint], index=new_df.index)
new_df['%s_Yrotation'%lft_joint] = pd.Series(data=signs[1]*track.values['%s_Yrotation'%rgt_joint], index=new_df.index)
new_df['%s_Zrotation'%lft_joint] = pd.Series(data=signs[2]*track.values['%s_Zrotation'%rgt_joint], index=new_df.index)
new_df['%s_Xrotation'%rgt_joint] = pd.Series(data=signs[0]*track.values['%s_Xrotation'%lft_joint], index=new_df.index)
new_df['%s_Yrotation'%rgt_joint] = pd.Series(data=signs[1]*track.values['%s_Yrotation'%lft_joint], index=new_df.index)
new_df['%s_Zrotation'%rgt_joint] = pd.Series(data=signs[2]*track.values['%s_Zrotation'%lft_joint], index=new_df.index)
# List the joints that are not left or right, i.e. are on the trunk
joints = (joint for joint in track.skeleton if 'Nub' not in joint and 'Left' not in joint and 'Right' not in joint)
for joint in joints:
#r = euler_df[[c for c in rots if joint in c]] # Get the columns that belong to this joint
#rot_order = track.skeleton[joint]['order']
#eulers = [[f[1]['%s_Xrotation'%joint], f[1]['%s_Yrotation'%joint], f[1]['%s_Zrotation'%joint]] for f in r.iterrows()]
# Create the corresponding columns in the new DataFrame
new_df['%s_Xrotation'%joint] = pd.Series(data=signs[0]*track.values['%s_Xrotation'%joint], index=new_df.index)
new_df['%s_Yrotation'%joint] = pd.Series(data=signs[1]*track.values['%s_Yrotation'%joint], index=new_df.index)
new_df['%s_Zrotation'%joint] = pd.Series(data=signs[2]*track.values['%s_Zrotation'%joint], index=new_df.index)
new_track.values = new_df
Q.append(new_track)
return Q
def inverse_transform(self, X, copy=None, start_pos=None):
return X
class EulerReorder(BaseEstimator, TransformerMixin):
def __init__(self, new_order):
"""
Add a
"""
self.new_order = new_order
def fit(self, X, y=None):
self.orig_skeleton = copy.deepcopy(X[0].skeleton)
print(self.orig_skeleton)
return self
def transform(self, X, y=None):
Q = []
for track in X:
channels = []
titles = []
euler_df = track.values
# Create a new DataFrame to store the exponential map rep
new_df = pd.DataFrame(index=euler_df.index)
# Copy the root positions into the new DataFrame
rxp = '%s_Xposition'%track.root_name
ryp = '%s_Yposition'%track.root_name
rzp = '%s_Zposition'%track.root_name
new_df[rxp] = pd.Series(data=euler_df[rxp], index=new_df.index)
new_df[ryp] = pd.Series(data=euler_df[ryp], index=new_df.index)
new_df[rzp] = pd.Series(data=euler_df[rzp], index=new_df.index)
# List the columns that contain rotation channels
rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton if 'Nub' not in joint)
new_track = track.clone()
for joint in joints:
r = euler_df[[c for c in rots if joint in c]] # Get the columns that belong to this joint
rot_order = track.skeleton[joint]['order']
euler = [[f[1]['%s_Xrotation'%(joint)], f[1]['%s_Yrotation'%(joint)], f[1]['%s_Zrotation'%(joint)]] for f in r.iterrows()]
new_euler = [euler_reorder(f, rot_order, self.new_order, True) for f in euler]
#new_euler = euler_reorder2(np.array(euler), rot_order, self.new_order, True)
# Create the corresponding columns in the new DataFrame
new_df['%s_%srotation'%(joint, self.new_order[0])] = pd.Series(data=[e[0] for e in new_euler], index=new_df.index)
new_df['%s_%srotation'%(joint, self.new_order[1])] = pd.Series(data=[e[1] for e in new_euler], index=new_df.index)
new_df['%s_%srotation'%(joint, self.new_order[2])] = pd.Series(data=[e[2] for e in new_euler], index=new_df.index)
new_track.skeleton[joint]['order'] = self.new_order
new_track.values = new_df
Q.append(new_track)
return Q
def inverse_transform(self, X, copy=None, start_pos=None):
return X
# Q = []
#
# for track in X:
# channels = []
# titles = []
# euler_df = track.values
#
# # Create a new DataFrame to store the exponential map rep
# new_df = pd.DataFrame(index=euler_df.index)
#
# # Copy the root positions into the new DataFrame
# rxp = '%s_Xposition'%track.root_name
# ryp = '%s_Yposition'%track.root_name
# rzp = '%s_Zposition'%track.root_name
# new_df[rxp] = pd.Series(data=euler_df[rxp], index=new_df.index)
# new_df[ryp] = pd.Series(data=euler_df[ryp], index=new_df.index)
# new_df[rzp] = pd.Series(data=euler_df[rzp], index=new_df.index)
#
# # List the columns that contain rotation channels
# rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]
#
# # List the joints that are not end sites, i.e., have channels
# joints = (joint for joint in track.skeleton if 'Nub' not in joint)
#
# new_track = track.clone()
# for joint in joints:
# r = euler_df[[c for c in rots if joint in c]] # Get the columns that belong to this joint
# rot_order = track.skeleton[joint]['order']
# new_order = self.orig_skeleton[joint]['order']
# print("rot_order:" + str(rot_order))
# print("new_order:" + str(new_order))
#
# euler = [[f[1]['%s_%srotation'%(joint, rot_order[0])], f[1]['%s_%srotation'%(joint, rot_order[1])], f[1]['%s_%srotation'%(joint, rot_order[2])]] for f in r.iterrows()]
# #new_euler = [euler_reorder(f, rot_order, new_order, True) for f in euler]
# new_euler = euler_reorder2(np.array(euler), rot_order, self.new_order, True)
#
# # Create the corresponding columns in the new DataFrame
# new_df['%s_%srotation'%(joint, new_order[0])] = pd.Series(data=[e[0] for e in new_euler], index=new_df.index)
# new_df['%s_%srotation'%(joint, new_order[1])] = pd.Series(data=[e[1] for e in new_euler], index=new_df.index)
# new_df['%s_%srotation'%(joint, new_order[2])] = pd.Series(data=[e[2] for e in new_euler], index=new_df.index)
#
# new_track.skeleton[joint]['order'] = new_order
#
# new_track.values = new_df
# Q.append(new_track)
# return Q
class JointSelector(BaseEstimator, TransformerMixin):
'''
Allows for filtering the mocap data to include only the selected joints
'''
def __init__(self, joints, include_root=False):
self.joints = joints
self.include_root = include_root
def fit(self, X, y=None):
selected_joints = []
selected_channels = []
if self.include_root:
selected_joints.append(X[0].root_name)
selected_joints.extend(self.joints)
for joint_name in selected_joints:
selected_channels.extend([o for o in X[0].values.columns if (joint_name + "_") in o and 'Nub' not in o])
self.selected_joints = selected_joints
self.selected_channels = selected_channels
self.not_selected = X[0].values.columns.difference(selected_channels)
self.not_selected_values = {c:X[0].values[c].values[0] for c in self.not_selected}
self.orig_skeleton = X[0].skeleton
return self
def transform(self, X, y=None):
print("JointSelector")
Q = []
for track in X:
t2 = track.clone()
for key in track.skeleton.keys():
if key not in self.selected_joints:
parent = t2.skeleton[key]['parent']
if parent in t2.skeleton:
t2.skeleton[parent]['children'].remove(key)
t2.skeleton.pop(key)
t2.values = track.values[self.selected_channels]
Q.append(t2)
return Q
def inverse_transform(self, X, copy=None):
Q = []
for track in X:
t2 = track.clone()
t2.skeleton = self.orig_skeleton
for d in self.not_selected:
t2.values[d] = self.not_selected_values[d]
Q.append(t2)
return Q
class Numpyfier(BaseEstimator, TransformerMixin):
'''
Just converts the values in a MocapData object into a numpy array
Useful for the final stage of a pipeline before training
'''
def __init__(self):
pass
def fit(self, X, y=None):
self.org_mocap_ = X[0].clone()
self.org_mocap_.values.drop(self.org_mocap_.values.index, inplace=True)
return self
def transform(self, X, y=None):
print("Numpyfier")
Q = []
for track in X:
Q.append(track.values.values)
#print("Numpyfier:" + str(track.values.columns))
return np.array(Q)
def inverse_transform(self, X, copy=None):
Q = []
for track in X:
new_mocap = self.org_mocap_.clone()
time_index = pd.to_timedelta([f for f in range(track.shape[0])], unit='s')
# print(self.org_mocap_.values.columns)
# import pdb;pdb.set_trace()
new_df = pd.DataFrame(data=track, index=time_index, columns=self.org_mocap_.values.columns)
new_mocap.values = new_df
Q.append(new_mocap)
return Q
class Slicer(BaseEstimator, TransformerMixin):
'''
Slice the data into intervals of equal size
'''
def __init__(self, window_size, overlap=0.5):
self.window_size = window_size
self.overlap = overlap
pass
def fit(self, X, y=None):
self.org_mocap_ = X[0].clone()
self.org_mocap_.values.drop(self.org_mocap_.values.index, inplace=True)
return self
def transform(self, X, y=None):
print("Slicer")
Q = []
for track in X:
vals = track.values.values
nframes = vals.shape[0]
overlap_frames = (int)(self.overlap*self.window_size)
n_sequences = (nframes-overlap_frames)//(self.window_size-overlap_frames)
if n_sequences>0:
y = np.zeros((n_sequences, self.window_size, vals.shape[1]))
# extract sequences from the input data
for i in range(0,n_sequences):
frameIdx = (self.window_size-overlap_frames) * i
Q.append(vals[frameIdx:frameIdx+self.window_size,:])
return np.array(Q)
def inverse_transform(self, X, copy=None):
Q = []
for track in X:
new_mocap = self.org_mocap_.clone()
time_index = pd.to_timedelta([f for f in range(track.shape[0])], unit='s')
new_df = pd.DataFrame(data=track, index=time_index, columns=self.org_mocap_.values.columns)
new_mocap.values = new_df
Q.append(new_mocap)
return Q
class RootTransformer(BaseEstimator, TransformerMixin):
def __init__(self, method, position_smoothing=0, rotation_smoothing=0):
"""
Accepted methods:
abdolute_translation_deltas
pos_rot_deltas
"""
self.method = method
self.position_smoothing=position_smoothing
self.rotation_smoothing=rotation_smoothing
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
print("RootTransformer")
Q = []
for track in X:
if self.method == 'abdolute_translation_deltas':
new_df = track.values.copy()
xpcol = '%s_Xposition'%track.root_name
ypcol = '%s_Yposition'%track.root_name
zpcol = '%s_Zposition'%track.root_name
dxpcol = '%s_dXposition'%track.root_name
dzpcol = '%s_dZposition'%track.root_name
x=track.values[xpcol].copy()
z=track.values[zpcol].copy()
if self.position_smoothing>0:
x_sm = filters.gaussian_filter1d(x, self.position_smoothing, axis=0, mode='nearest')
z_sm = filters.gaussian_filter1d(z, self.position_smoothing, axis=0, mode='nearest')
dx = pd.Series(data=x_sm, index=new_df.index).diff()
dz = pd.Series(data=z_sm, index=new_df.index).diff()
new_df[xpcol] = x-x_sm
new_df[zpcol] = z-z_sm
else:
dx = x.diff()
dz = z.diff()
new_df.drop([xpcol, zpcol], axis=1, inplace=True)
dx[0] = dx[1]
dz[0] = dz[1]
new_df[dxpcol] = dx
new_df[dzpcol] = dz
new_track = track.clone()
new_track.values = new_df
# end of abdolute_translation_deltas
elif self.method == 'pos_rot_deltas':
new_track = track.clone()
# Absolute columns
xp_col = '%s_Xposition'%track.root_name
yp_col = '%s_Yposition'%track.root_name
zp_col = '%s_Zposition'%track.root_name
#rot_order = track.skeleton[track.root_name]['order']
#%(joint, rot_order[0])
rot_order = track.skeleton[track.root_name]['order']
r1_col = '%s_%srotation'%(track.root_name, rot_order[0])
r2_col = '%s_%srotation'%(track.root_name, rot_order[1])
r3_col = '%s_%srotation'%(track.root_name, rot_order[2])
# Delta columns
dxp_col = '%s_dXposition'%track.root_name
dzp_col = '%s_dZposition'%track.root_name
dxr_col = '%s_dXrotation'%track.root_name
dyr_col = '%s_dYrotation'%track.root_name
dzr_col = '%s_dZrotation'%track.root_name
positions = np.transpose(np.array([track.values[xp_col], track.values[yp_col], track.values[zp_col]]))
rotations = np.pi/180.0*np.transpose(np.array([track.values[r1_col], track.values[r2_col], track.values[r3_col]]))
""" Get Trajectory and smooth it"""
trajectory_filterwidth = self.position_smoothing
reference = positions.copy()*np.array([1,0,1])
if trajectory_filterwidth>0:
reference = filters.gaussian_filter1d(reference, trajectory_filterwidth, axis=0, mode='nearest')
""" Get Root Velocity """
velocity = np.diff(reference, axis=0)
velocity = np.vstack((velocity[0,:], velocity))
""" Remove Root Translation """
positions = positions-reference
""" Get Forward Direction along the x-z plane, assuming character is facig z-forward """
#forward = [Rotation(f, 'euler', from_deg=True, order=rot_order).rotmat[:,2] for f in rotations] # get the z-axis of the rotation matrix, assuming character is facig z-forward
#print("order:" + rot_order.lower())
quats = Quaternions.from_euler(rotations, order=rot_order.lower(), world=False)
forward = quats*np.array([[0,0,1]])
forward[:,1] = 0
""" Smooth Forward Direction """
direction_filterwidth = self.rotation_smoothing
if direction_filterwidth>0:
forward = filters.gaussian_filter1d(forward, direction_filterwidth, axis=0, mode='nearest')
forward = forward / np.sqrt((forward**2).sum(axis=-1))[...,np.newaxis]
""" Remove Y Rotation """
target = np.array([[0,0,1]]).repeat(len(forward), axis=0)
rotation = Quaternions.between(target, forward)[:,np.newaxis]
positions = (-rotation[:,0]) * positions
new_rotations = (-rotation[:,0]) * quats
velocity = (-rotation[:,0]) * velocity
""" Get Root Rotation """
#print(rotation[:,0])
rvelocity = Pivots.from_quaternions(rotation[1:] * -rotation[:-1]).ps
rvelocity = np.vstack((rvelocity[0], rvelocity))
eulers = np.array([t3d.euler.quat2euler(q, axes=('s'+rot_order.lower()[::-1]))[::-1] for q in new_rotations])*180.0/np.pi
new_df = track.values.copy()
root_pos_x = pd.Series(data=positions[:,0], index=new_df.index)
root_pos_y = pd.Series(data=positions[:,1], index=new_df.index)
root_pos_z = pd.Series(data=positions[:,2], index=new_df.index)
root_pos_x_diff = pd.Series(data=velocity[:,0], index=new_df.index)
root_pos_z_diff = pd.Series(data=velocity[:,2], index=new_df.index)
root_rot_1 = pd.Series(data=eulers[:,0], index=new_df.index)
root_rot_2 = pd.Series(data=eulers[:,1], index=new_df.index)
root_rot_3 = pd.Series(data=eulers[:,2], index=new_df.index)
root_rot_y_diff = | pd.Series(data=rvelocity[:,0], index=new_df.index) | pandas.Series |
# This script performs the statistical analysis for the pollution growth paper
# Importing required modules
import pandas as pd
import numpy as np
import statsmodels.api as stats
from ToTeX import restab
# Reading in the data
data = pd.read_csv('C:/Users/User/Documents/Data/Pollution/pollution_data.csv')
W = pd.read_csv('C:/Users/User/Documents/Data/Pollution/pollution_data_W_reference.csv', header = None)
# Creating a reference list of nations
nations = list(data.Country.unique())
# Prepping data for pollution regression
# Data sets for individual pollutants
co2_data = data[['ln_co2', 'ln_co2_lag', 'ln_sk', 'ln_n5', 'ln_co2_intensity_rate', 'Country', 'Year', 'ln_co2_intensity_lag']].dropna()
ch4_data = data[['ln_ch4', 'ln_ch4_lag', 'ln_sk', 'ln_n5', 'ln_ch4_intensity_rate', 'Country', 'Year', 'ln_ch4_intensity_lag']].dropna()
nox_data = data[['ln_nox', 'ln_nox_lag', 'ln_sk', 'ln_n5', 'ln_nox_intensity_rate', 'Country', 'Year', 'ln_nox_intensity_lag']].dropna()
# Creating dummy variables for each pollutant
co2_national_dummies = pd.get_dummies(co2_data['Country'])
co2_year_dummies = pd.get_dummies(co2_data['Year'])
ch4_national_dummies = pd.get_dummies(ch4_data['Country'])
ch4_year_dummies = pd.get_dummies(ch4_data['Year'])
nox_national_dummies = pd.get_dummies(nox_data['Country'])
nox_year_dummies = pd.get_dummies(nox_data['Year'])
# Replacing Country and Year with fixed effects
co2_data = pd.concat([co2_data, co2_national_dummies, co2_year_dummies], axis = 1)
ch4_data = pd.concat([ch4_data, ch4_national_dummies, ch4_year_dummies], axis = 1)
nox_data = pd.concat([nox_data, nox_national_dummies, nox_year_dummies], axis = 1)
co2_data = co2_data.drop(['Country', 'Year', 1971, 'United States'], axis = 1)
ch4_data = ch4_data.drop(['Country', 'Year', 1971, 'United States'], axis = 1)
nox_data = nox_data.drop(['Country', 'Year', 1971, 'United States'], axis = 1)
# Create the Y and X matrices
CO2 = co2_data['ln_co2']
CH4 = ch4_data['ln_ch4']
NOX = nox_data['ln_nox']
X_CO2 = co2_data.drop(['ln_co2'], axis = 1)
X_CH4 = ch4_data.drop(['ln_ch4'], axis = 1)
X_NOX = nox_data.drop(['ln_nox'], axis = 1)
# Running pollution regressions
co2_mod = stats.OLS(CO2, X_CO2)
ch4_mod = stats.OLS(CH4, X_CH4)
nox_mod = stats.OLS(NOX, X_NOX)
models = [co2_mod, ch4_mod, nox_mod]
names = ['CO2', 'CH4', 'NOx']
res_list = []
for mod in models:
res = mod.fit(cov_type = 'HC1')
res_list.append(res)
print(res.summary())
file = open('C:/Users/User/Documents/Data/Pollution/' + names[models.index(mod)] + '.txt', 'w')
file.write(res.summary().as_text())
file.close()
restab(res_list, 'C:/Users/User/Documents/Data/Pollution/restab.txt')
# After running the conditional convergence models, we set up the network effects models
# Compute technology growth rate
# \widetilde{g} = \left(\frac{1}{T}\right)\sum\limits_{t=1}^{T}\left(\frac{\eta_{t}}{t-\gamma(t-1)}\right)
g_co2 = (1/23) * sum([(co2_mod.fit().params[i] / ((i-1971) - (co2_mod.fit().params['ln_co2_lag'] * (i-1972)))) for i in range(1972,2015)])
g_ch4 = (1/21) * sum([(ch4_mod.fit().params[i] / ((i-1971) - (ch4_mod.fit().params['ln_ch4_lag'] * (i-1972)))) for i in range(1972,2013)])
g_nox = (1/21) * sum([(nox_mod.fit().params[i] / ((i-1971) - (nox_mod.fit().params['ln_nox_lag'] * (i-1972)))) for i in range(1972,2013)])
# Add technology parameters to the dataframe
co2_tech = []
ch4_tech = []
nox_tech = []
for i in range(len(data)):
if data.Year[i] > 1970 and data.Country[i] in co2_mod.fit().params.keys():
co2_tech.append(co2_mod.fit().params[data.Country[i]] + (g_co2 * (data.Year[i] - 1971)))
else:
co2_tech.append('')
if data.Year[i] > 1970 and data.Country[i] in ch4_mod.fit().params.keys():
ch4_tech.append(ch4_mod.fit().params[data.Country[i]] + (g_ch4 * (data.Year[i] - 1971)))
else:
ch4_tech.append('')
if data.Year[i] > 1970 and data.Country[i] in nox_mod.fit().params.keys():
nox_tech.append(nox_mod.fit().params[data.Country[i]] + (g_nox * (data.Year[i] - 1971)))
else:
nox_tech.append('')
# Add technology values to data set
co2_tech = pd.Series(co2_tech, name = 'co2_tech')
ch4_tech = pd.Series(co2_tech, name = 'ch4_tech')
nox_tech = pd.Series(co2_tech, name = 'nox_tech')
data = pd.concat([data, co2_tech, ch4_tech, nox_tech], axis = 1)
# Convert '' to np.nan to use pandas dropna
data[data[['co2_tech', 'ch4_tech', 'nox_tech']] == ''] = np.nan
# Data prep for network effects regressions for intensities
tc_co2_rob = data[['co2_intensity', 'co2_intensity_init', 'co2_intensity_lag', 'co2_tech', 'TC_CO2_ROB', 'Country', 'Year']].dropna()
tc_ch4_rob = data[['ch4_intensity', 'ch4_intensity_init', 'ch4_intensity_lag', 'ch4_tech', 'TC_CH4_ROB', 'Country', 'Year']].dropna()
tc_nox_rob = data[['nox_intensity', 'nox_intensity_init', 'nox_intensity_lag', 'nox_tech', 'TC_NOX_ROB', 'Country', 'Year']].dropna()
co2_national_dummies = | pd.get_dummies(tc_co2_rob['Country']) | pandas.get_dummies |
from datetime import datetime, timedelta
import unittest
from pandas.core.datetools import (
bday, BDay, BQuarterEnd, BMonthEnd, BYearEnd, MonthEnd,
DateOffset, Week, YearBegin, YearEnd, Hour, Minute, Second,
format, ole2datetime, to_datetime, normalize_date,
getOffset, getOffsetName, inferTimeRule, hasOffsetName)
from nose.tools import assert_raises
####
## Misc function tests
####
def test_format():
actual = format(datetime(2008, 1, 15))
assert actual == '20080115'
def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
assert_raises(Exception, ole2datetime, 60)
def test_to_datetime1():
actual = to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
actual = to_datetime('20080115')
assert actual == datetime(2008, 1, 15)
# unparseable
s = 'Month 1, 1999'
assert to_datetime(s) == s
def test_normalize_date():
actual = normalize_date(datetime(2007, 10, 1, 1, 12, 5, 10))
assert actual == datetime(2007, 10, 1)
#####
### DateOffset Tests
#####
class TestDateOffset(object):
def setUp(self):
self.d = datetime(2008, 1, 2)
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
def test_constructor(self):
assert((self.d + DateOffset(months=2)) == datetime(2008, 3, 2))
assert((self.d - DateOffset(months=2)) == datetime(2007, 11, 2))
assert((self.d + DateOffset(2)) == datetime(2008, 1, 4))
assert not DateOffset(2).isAnchored()
assert DateOffset(1).isAnchored()
d = datetime(2008, 1, 31)
assert((d + DateOffset(months=1)) == datetime(2008, 2, 29))
def test_copy(self):
assert(DateOffset(months=2).copy() == DateOffset(months=2))
class TestBusinessDay(unittest.TestCase):
def setUp(self):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset2 = BDay(2)
def test_repr(self):
assert repr(self.offset) == '<1 BusinessDay>'
assert repr(self.offset2) == '<2 BusinessDays>'
expected = '<1 BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def testEQ(self):
self.assertEqual(self.offset2, self.offset2)
def test_mul(self):
pass
def test_hash(self):
self.assertEqual(hash(self.offset2), hash(self.offset2))
def testCall(self):
self.assertEqual(self.offset2(self.d), datetime(2008, 1, 3))
def testRAdd(self):
self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
def testSub(self):
off = self.offset2
self.assertRaises(Exception, off.__sub__, self.d)
self.assertEqual(2 * off - off, off)
self.assertEqual(self.d - self.offset2, self.d + BDay(-2))
def testRSub(self):
self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
def testMult1(self):
self.assertEqual(self.d + 10*self.offset, self.d + BDay(10))
def testMult2(self):
self.assertEqual(self.d + (-5*BDay(-10)),
self.d + BDay(50))
def testRollback1(self):
self.assertEqual(BDay(10).rollback(self.d), self.d)
def testRollback2(self):
self.assertEqual(BDay(10).rollback(datetime(2008, 1, 5)), datetime(2008, 1, 4))
def testRollforward1(self):
self.assertEqual(BDay(10).rollforward(self.d), self.d)
def testRollforward2(self):
self.assertEqual(BDay(10).rollforward(datetime(2008, 1, 5)), datetime(2008, 1, 7))
def test_onOffset(self):
tests = [(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
tests = []
tests.append((bday,
{datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
tests.append((2*bday,
{datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
tests.append((-bday,
{datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
tests.append((-2*bday,
{datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
tests.append((BDay(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_apply_corner(self):
self.assertRaises(Exception, BDay().apply, BMonthEnd())
def assertOnOffset(offset, date, expected):
actual = offset.onOffset(date)
assert actual == expected
class TestWeek(unittest.TestCase):
def test_corner(self):
self.assertRaises(Exception, Week, weekday=7)
self.assertRaises(Exception, Week, weekday=-1)
def test_isAnchored(self):
self.assert_(Week(weekday=0).isAnchored())
self.assert_(not Week().isAnchored())
self.assert_(not Week(2, weekday=2).isAnchored())
self.assert_(not Week(2).isAnchored())
def test_offset(self):
tests = []
tests.append((Week(), # not business week
{datetime(2008, 1, 1): datetime(2008, 1, 8),
datetime(2008, 1, 4): datetime(2008, 1, 11),
datetime(2008, 1, 5): datetime(2008, 1, 12),
datetime(2008, 1, 6): datetime(2008, 1, 13),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(weekday=0), # Mon
{datetime(2007, 12, 31): datetime(2008, 1, 7),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(0, weekday=0), # n=0 -> roll forward. Mon
{datetime(2007, 12, 31): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
tests.append((Week(-2, weekday=1), # n=0 -> roll forward. Mon
{datetime(2010, 4, 6): datetime(2010, 3, 23),
datetime(2010, 4, 8): datetime(2010, 3, 30),
datetime(2010, 4, 5): datetime(2010, 3, 23)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(Week(weekday=0), datetime(2008, 1, 1), False),
(Week(weekday=0), datetime(2008, 1, 2), False),
(Week(weekday=0), datetime(2008, 1, 3), False),
(Week(weekday=0), datetime(2008, 1, 4), False),
(Week(weekday=0), datetime(2008, 1, 5), False),
(Week(weekday=0), datetime(2008, 1, 6), False),
(Week(weekday=0), datetime(2008, 1, 7), True),
(Week(weekday=1), datetime(2008, 1, 1), True),
(Week(weekday=1), datetime(2008, 1, 2), False),
(Week(weekday=1), datetime(2008, 1, 3), False),
(Week(weekday=1), datetime(2008, 1, 4), False),
(Week(weekday=1), datetime(2008, 1, 5), False),
(Week(weekday=1), datetime(2008, 1, 6), False),
(Week(weekday=1), datetime(2008, 1, 7), False),
(Week(weekday=2), datetime(2008, 1, 1), False),
(Week(weekday=2), datetime(2008, 1, 2), True),
(Week(weekday=2), datetime(2008, 1, 3), False),
(Week(weekday=2), datetime(2008, 1, 4), False),
(Week(weekday=2), datetime(2008, 1, 5), False),
(Week(weekday=2), datetime(2008, 1, 6), False),
(Week(weekday=2), datetime(2008, 1, 7), False),
(Week(weekday=3), datetime(2008, 1, 1), False),
(Week(weekday=3), datetime(2008, 1, 2), False),
(Week(weekday=3), datetime(2008, 1, 3), True),
(Week(weekday=3), datetime(2008, 1, 4), False),
(Week(weekday=3), datetime(2008, 1, 5), False),
(Week(weekday=3), datetime(2008, 1, 6), False),
(Week(weekday=3), datetime(2008, 1, 7), False),
(Week(weekday=4), datetime(2008, 1, 1), False),
(Week(weekday=4), datetime(2008, 1, 2), False),
(Week(weekday=4), datetime(2008, 1, 3), False),
(Week(weekday=4), datetime(2008, 1, 4), True),
(Week(weekday=4), datetime(2008, 1, 5), False),
(Week(weekday=4), datetime(2008, 1, 6), False),
(Week(weekday=4), datetime(2008, 1, 7), False),
(Week(weekday=5), datetime(2008, 1, 1), False),
(Week(weekday=5), datetime(2008, 1, 2), False),
(Week(weekday=5), datetime(2008, 1, 3), False),
(Week(weekday=5), datetime(2008, 1, 4), False),
(Week(weekday=5), datetime(2008, 1, 5), True),
(Week(weekday=5), datetime(2008, 1, 6), False),
(Week(weekday=5), datetime(2008, 1, 7), False),
(Week(weekday=6), datetime(2008, 1, 1), False),
(Week(weekday=6), datetime(2008, 1, 2), False),
(Week(weekday=6), datetime(2008, 1, 3), False),
(Week(weekday=6), datetime(2008, 1, 4), False),
(Week(weekday=6), datetime(2008, 1, 5), False),
(Week(weekday=6), datetime(2008, 1, 6), True),
(Week(weekday=6), datetime(2008, 1, 7), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBMonthEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((BMonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 29),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((BMonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 2, 28),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2008, 5, 30),
datetime(2008, 12, 31): datetime(2008, 11, 28),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(BMonthEnd(), datetime(2007, 12, 31), True),
(BMonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestMonthEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((MonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((MonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 11, 30),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(MonthEnd(), datetime(2007, 12, 31), True),
(MonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBQuarterEnd(unittest.TestCase):
def test_corner(self):
self.assertRaises(Exception, BQuarterEnd, startingMonth=4)
self.assertRaises(Exception, BQuarterEnd, startingMonth=-1)
def test_isAnchored(self):
self.assert_(BQuarterEnd(startingMonth=1).isAnchored())
self.assert_(BQuarterEnd().isAnchored())
self.assert_(not BQuarterEnd(2, startingMonth=1).isAnchored())
def test_offset(self):
tests = []
tests.append((BQuarterEnd(startingMonth=1),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 4, 30),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 7, 31),}))
tests.append((BQuarterEnd(startingMonth=2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2008, 2, 15): datetime(2008, 2, 29),
datetime(2008, 2, 29): datetime(2008, 5, 30),
datetime(2008, 3, 15): datetime(2008, 5, 30),
datetime(2008, 3, 31): datetime(2008, 5, 30),
datetime(2008, 4, 15): datetime(2008, 5, 30),
datetime(2008, 4, 30): datetime(2008, 5, 30),}))
tests.append((BQuarterEnd(startingMonth=1, n=0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 4, 30),}))
tests.append((BQuarterEnd(startingMonth=1, n=-1),
{datetime(2008, 1, 1): datetime(2007, 10, 31),
datetime(2008, 1, 31): datetime(2007, 10, 31),
datetime(2008, 2, 15): datetime(2008, 1, 31),
datetime(2008, 2, 29): datetime(2008, 1, 31),
datetime(2008, 3, 15): datetime(2008, 1, 31),
datetime(2008, 3, 31): datetime(2008, 1, 31),
datetime(2008, 4, 15): datetime(2008, 1, 31),
datetime(2008, 4, 30): datetime(2008, 1, 31),}))
tests.append((BQuarterEnd(startingMonth=1, n=2),
{datetime(2008, 1, 31): datetime(2008, 7, 31),
datetime(2008, 2, 15): datetime(2008, 7, 31),
datetime(2008, 2, 29): datetime(2008, 7, 31),
datetime(2008, 3, 15): datetime(2008, 7, 31),
datetime(2008, 3, 31): datetime(2008, 7, 31),
datetime(2008, 4, 15): datetime(2008, 7, 31),
datetime(2008, 4, 30): datetime(2008, 10, 31),}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
# corner
offset = BQuarterEnd(n=-1, startingMonth=1)
self.assertEqual(datetime(2010, 1, 31) + offset, datetime(2010, 1, 29))
def test_onOffset(self):
tests = [(BQuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), True),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), True),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), True),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestYearBegin(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((YearBegin(),
{datetime(2008, 1, 1): datetime(2009, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 1),
datetime(2005, 12, 31): datetime(2006, 1, 1),}))
tests.append((YearBegin(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 1),
datetime(2005, 12, 31): datetime(2006, 1, 1),}))
tests.append((YearBegin(-1),
{datetime(2007, 1, 1): datetime(2006, 1, 1),
datetime(2008, 6, 30): datetime(2008, 1, 1),
datetime(2008, 12, 31): datetime(2008, 1, 1),
datetime(2006, 12, 29): datetime(2006, 1, 1),
datetime(2006, 12, 30): datetime(2006, 1, 1),
datetime(2007, 1, 1): datetime(2006, 1, 1),}))
tests.append((YearBegin(-2),
{datetime(2007, 1, 1): datetime(2005, 1, 1),
datetime(2008, 6, 30): datetime(2007, 1, 1),
datetime(2008, 12, 31): datetime(2007, 1, 1),}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [
(YearBegin(), datetime(2007, 1, 3), False),
(YearBegin(), datetime(2008, 1, 1), True),
(YearBegin(), datetime(2006, 12, 31), False),
(YearBegin(), datetime(2006, 1, 2), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBYearEndLagged(unittest.TestCase):
def test_bad_month_fail(self):
self.assertRaises(Exception, BYearEnd, month=13)
self.assertRaises(Exception, BYearEnd, month=0)
def test_offset(self):
tests = []
tests.append((BYearEnd(month=6),
{datetime(2008, 1, 1): datetime(2008, 6, 30),
datetime(2007, 6, 30): datetime(2008, 6, 30)},
))
tests.append((BYearEnd(n=-1, month=6),
{datetime(2008, 1, 1): datetime(2007, 6, 29),
datetime(2007, 6, 30): datetime(2007, 6, 29)},
))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
self.assertEqual(baseDate + dateOffset, expected)
def test_roll(self):
offset = BYearEnd(month=6)
date = datetime(2009, 11, 30)
self.assertEqual(offset.rollforward(date), datetime(2010, 6, 30))
self.assertEqual(offset.rollback(date), datetime(2009, 6, 30))
def test_onOffset(self):
tests = [
(BYearEnd(month=2), datetime(2007, 2, 28), True),
(BYearEnd(month=6), datetime(2007, 6, 30), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBYearEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((BYearEnd(),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2009, 12, 31),
datetime(2005, 12, 30): datetime(2006, 12, 29),
datetime(2005, 12, 31): datetime(2006, 12, 29),}))
tests.append((BYearEnd(0),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2008, 12, 31),
datetime(2005, 12, 31): datetime(2006, 12, 29),}))
tests.append((BYearEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2007, 12, 31),
datetime(2008, 12, 31): datetime(2007, 12, 31),
datetime(2006, 12, 29): datetime(2005, 12, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29),}))
tests.append((BYearEnd(-2),
{datetime(2007, 1, 1): datetime(2005, 12, 30),
datetime(2008, 6, 30): datetime(2006, 12, 29),
datetime(2008, 12, 31): datetime(2006, 12, 29),}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [
(BYearEnd(), datetime(2007, 12, 31), True),
(BYearEnd(), datetime(2008, 1, 1), False),
(BYearEnd(), datetime(2006, 12, 31), False),
(BYearEnd(), datetime(2006, 12, 29), True),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestYearEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((YearEnd(),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2009, 12, 31),
datetime(2005, 12, 30): datetime(2005, 12, 31),
datetime(2005, 12, 31): datetime(2006, 12, 31),}))
tests.append((YearEnd(0),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2008, 12, 31),
datetime(2005, 12, 30): datetime(2005, 12, 31),}))
tests.append((YearEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2007, 12, 31),
datetime(2008, 12, 31): datetime(2007, 12, 31),
datetime(2006, 12, 29): datetime(2005, 12, 31),
datetime(2006, 12, 30): datetime(2005, 12, 31),
datetime(2007, 1, 1): datetime(2006, 12, 31),}))
tests.append((YearEnd(-2),
{datetime(2007, 1, 1): datetime(2005, 12, 31),
datetime(2008, 6, 30): datetime(2006, 12, 31),
datetime(2008, 12, 31): datetime(2006, 12, 31),}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [
(YearEnd(), datetime(2007, 12, 31), True),
(YearEnd(), datetime(2008, 1, 1), False),
(YearEnd(), datetime(2006, 12, 31), True),
(YearEnd(), datetime(2006, 12, 29), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def testOnOffset():
tests = [#(QuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
#(QuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
#(QuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
#(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), False),
#(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False),
#(QuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
#(QuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 6, 29), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 6, 30), False),
#(QuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),
#(QuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),
#(QuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
#(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), False),
#(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False),
#(QuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),
#(QuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), True),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 6, 29), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 6, 30), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),
#(QuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),
#(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), False),
#(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), True),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 6, 29), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 6, 30), True),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def assertEq(dateOffset, baseDate, expected):
actual = dateOffset + baseDate
assert actual == expected
def test_Hour():
assertEq(Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 1))
assertEq(Hour(-1), datetime(2010, 1, 1, 1), datetime(2010, 1, 1))
assertEq(2 * Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 2))
assertEq(-1 * Hour(), datetime(2010, 1, 1, 1), datetime(2010, 1, 1))
assert (Hour(3) + Hour(2)) == Hour(5)
assert (Hour(3) - Hour(2)) == Hour()
def test_Minute():
assertEq(Minute(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 1))
assertEq(Minute(-1), datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1))
assertEq(2 * Minute(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 2))
assertEq(-1 * Minute(), datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1))
assert (Minute(3) + Minute(2)) == Minute(5)
assert (Minute(3) - Minute(2)) == | Minute() | pandas.core.datetools.Minute |
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rpow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rpow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rfloordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rfloordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rtruediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rtruediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rdiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rdiv__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)("a")
except TypeError:
with pytest.raises(TypeError):
repr(getattr(modin_df, op)("a"))
else:
modin_result = getattr(modin_df, op)("a")
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive values
# We need to check that negative integers are not used efficiently
if "100x100" not in request.node.name:
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rtruediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rsub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "__rsub__")
# END test dataframe right operations
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
# Scaler
assert modin_df.at[0, key1] == pandas_df.at[0, key1]
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ftypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.ftypes, pandas_df.ftypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(self, data, key):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get(key), pandas_df.get(key))
df_equals(
modin_df.get(key, default="default"), pandas_df.get(key, default="default")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_dtype_counts(self, data):
modin_result = pd.DataFrame(data).get_dtype_counts().sort_index()
pandas_result = pandas.DataFrame(data).get_dtype_counts().sort_index()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"dummy_na", bool_arg_values, ids=arg_keys("dummy_na", bool_arg_keys)
)
@pytest.mark.parametrize(
"drop_first", bool_arg_values, ids=arg_keys("drop_first", bool_arg_keys)
)
def test_get_dummies(self, request, data, dummy_na, drop_first):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas.get_dummies(
pandas_df, dummy_na=dummy_na, drop_first=drop_first
)
except Exception as e:
with pytest.raises(type(e)):
pd.get_dummies(modin_df, dummy_na=dummy_na, drop_first=drop_first)
else:
modin_result = pd.get_dummies(
modin_df, dummy_na=dummy_na, drop_first=drop_first
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_ftype_counts(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get_ftype_counts(), pandas_df.get_ftype_counts())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg(self, data, axis, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.aggregate(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.aggregate(func, axis)
else:
modin_result = modin_df.aggregate(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_aggregate_error_checking(self, data):
modin_df = pd.DataFrame(data)
assert modin_df.aggregate("ndim") == 2
with pytest.warns(UserWarning):
modin_df.aggregate(
{modin_df.columns[0]: "sum", modin_df.columns[1]: "mean"}
)
with pytest.warns(UserWarning):
modin_df.aggregate("cumproduct")
with pytest.raises(ValueError):
modin_df.aggregate("NOT_EXISTS")
def test_align(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).align(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_all(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.all(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.all(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.T.all(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_any(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.any(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_df.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_df.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(pandas_df.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df.iloc[-1])
else:
modin_result = modin_df.append(modin_df.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(list(pandas_df.iloc[-1]))
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(list(modin_df.iloc[-1]))
else:
modin_result = modin_df.append(list(modin_df.iloc[-1]))
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_df.append(
[pandas_df, pandas_df], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
else:
modin_result = modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(
pandas_df, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df, verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(
modin_df, verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(TypeError):
modin_df.apply({"row": func}, axis=1)
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
def test_apply_metadata(self):
def add(a, b, c):
return a + b + c
data = {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}
modin_df = pd.DataFrame(data)
modin_df["add"] = modin_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
pandas_df = pandas.DataFrame(data)
pandas_df["add"] = pandas_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_apply_numeric(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.apply(lambda df: df.drop(key), axis=1)
pandas_result = pandas_df.apply(lambda df: df.drop(key), axis=1)
df_equals(modin_result, pandas_result)
def test_as_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).as_blocks()
def test_as_matrix(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
mat = frame.as_matrix()
frame_columns = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frame_columns[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
mat = pd.DataFrame(test_data.mixed_frame).as_matrix(["foo", "A"])
assert mat[0, 0] == "bar"
df = pd.DataFrame({"real": [1, 2, 3], "complex": [1j, 2j, 3j]})
mat = df.as_matrix()
assert mat[0, 1] == 1j
# single block corner case
mat = pd.DataFrame(test_data.frame).as_matrix(["A", "B"])
expected = test_data.frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(mat, expected)
def test_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
assert_array_equal(frame.values, test_data.frame.values)
def test_partition_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
for (
partition
) in frame._query_compiler._modin_frame._partitions.flatten().tolist():
assert_array_equal(partition.to_pandas().values, partition.to_numpy())
def test_asfreq(self):
index = pd.date_range("1/1/2000", periods=4, freq="T")
series = pd.Series([0.0, None, 2.0, 3.0], index=index)
df = pd.DataFrame({"s": series})
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
df.asfreq(freq="30S")
def test_asof(self):
df = pd.DataFrame(
{"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]},
index=pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
),
)
with pytest.warns(UserWarning):
df.asof(pd.DatetimeIndex(["2018-02-27 09:03:30", "2018-02-27 09:04:30"]))
def test_assign(self):
data = test_data_values[0]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.warns(UserWarning):
modin_result = modin_df.assign(new_column=pd.Series(modin_df.iloc[:, 0]))
pandas_result = pandas_df.assign(new_column=pd.Series(pandas_df.iloc[:, 0]))
df_equals(modin_result, pandas_result)
def test_astype(self):
td = TestData()
modin_df = pd.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
expected_df = pandas.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
modin_df_casted = modin_df.astype(np.int32)
expected_df_casted = expected_df.astype(np.int32)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(np.float64)
expected_df_casted = expected_df.astype(np.float64)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(str)
expected_df_casted = expected_df.astype(str)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype("category")
expected_df_casted = expected_df.astype("category")
df_equals(modin_df_casted, expected_df_casted)
dtype_dict = {"A": np.int32, "B": np.int64, "C": str}
modin_df_casted = modin_df.astype(dtype_dict)
expected_df_casted = expected_df.astype(dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
# Ignore lint because this is testing bad input
bad_dtype_dict = {"B": np.int32, "B": np.int64, "B": str} # noqa F601
modin_df_casted = modin_df.astype(bad_dtype_dict)
expected_df_casted = expected_df.astype(bad_dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
with pytest.raises(KeyError):
modin_df.astype({"not_exists": np.uint8})
def test_astype_category(self):
modin_df = pd.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
pandas_df = pandas.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
def test_at_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.at_time("12:00")
def test_between_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.between_time("0:15", "0:45")
def test_bfill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.bfill(), test_data.tsframe.bfill())
def test_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).blocks
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bool(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(ValueError):
modin_df.bool()
modin_df.__bool__()
single_bool_pandas_df = pandas.DataFrame([True])
single_bool_modin_df = pd.DataFrame([True])
assert single_bool_pandas_df.bool() == single_bool_modin_df.bool()
with pytest.raises(ValueError):
# __bool__ always raises this error for DataFrames
single_bool_modin_df.__bool__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_boxplot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
assert modin_df.boxplot() == to_pandas(modin_df).boxplot()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test only upper scalar bound
modin_result = modin_df.clip(None, upper, axis=axis)
pandas_result = pandas_df.clip(None, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_df.clip(lower, upper, axis=axis)
pandas_result = pandas_df.clip(lower, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper list bound on each column
modin_result = modin_df.clip(lower_list, upper_list, axis=axis)
pandas_result = pandas_df.clip(lower_list, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
# test only upper list bound on each column
modin_result = modin_df.clip(np.nan, upper_list, axis=axis)
pandas_result = pandas_df.clip(np.nan, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
modin_df.clip(lower=[1, 2, 3], axis=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_lower(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test lower scalar bound
pandas_result = pandas_df.clip_lower(lower, axis=axis)
modin_result = modin_df.clip_lower(lower, axis=axis)
df_equals(modin_result, pandas_result)
# test lower list bound on each column
pandas_result = pandas_df.clip_lower(lower_list, axis=axis)
modin_result = modin_df.clip_lower(lower_list, axis=axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_upper(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
upper = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test upper scalar bound
modin_result = modin_df.clip_upper(upper, axis=axis)
pandas_result = pandas_df.clip_upper(upper, axis=axis)
df_equals(modin_result, pandas_result)
# test upper list bound on each column
modin_result = modin_df.clip_upper(upper_list, axis=axis)
pandas_result = pandas_df.clip_upper(upper_list, axis=axis)
df_equals(modin_result, pandas_result)
def test_combine(self):
df1 = pd.DataFrame({"A": [0, 0], "B": [4, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
def test_combine_first(self):
df1 = pd.DataFrame({"A": [None, 0], "B": [None, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine_first(df2)
def test_compound(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).compound()
def test_corr(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corr()
def test_corrwith(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corrwith(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_count(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.T.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
try: # test error
pandas_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
try: # test error
pandas_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
for level in list(range(levels)) + (axis_names if axis_names else []):
modin_multi_level_result = modin_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
pandas_multi_level_result = pandas_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
def test_cov(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).cov()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummax(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummin(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumprod(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumsum(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# pandas exhibits weird behavior for this case
# Remove this case when we can pull the error messages from backend
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.T.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_describe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.describe(), pandas_df.describe())
percentiles = [0.10, 0.11, 0.44, 0.78, 0.99]
df_equals(
modin_df.describe(percentiles=percentiles),
pandas_df.describe(percentiles=percentiles),
)
try:
pandas_result = pandas_df.describe(exclude=[np.float64])
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=[np.float64])
else:
modin_result = modin_df.describe(exclude=[np.float64])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(exclude=np.float64)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=np.float64)
else:
modin_result = modin_df.describe(exclude=np.float64)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
else:
modin_result = modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=str(modin_df.dtypes.values[0]))
pandas_result = pandas_df.describe(include=str(pandas_df.dtypes.values[0]))
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=[np.number])
pandas_result = pandas_df.describe(include=[np.number])
df_equals(modin_result, pandas_result)
df_equals(modin_df.describe(include="all"), pandas_df.describe(include="all"))
modin_df = pd.DataFrame(data).applymap(str)
pandas_df = pandas.DataFrame(data).applymap(str)
try:
df_equals(modin_df.describe(), pandas_df.describe())
except AssertionError:
# We have to do this because we choose the highest count slightly differently
# than pandas. Because there is no true guarantee which one will be first,
# If they don't match, make sure that the `freq` is the same at least.
df_equals(
modin_df.describe().loc[["count", "unique", "freq"]],
pandas_df.describe().loc[["count", "unique", "freq"]],
)
def test_describe_dtypes(self):
modin_df = pd.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
pandas_df = pandas.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
modin_result = modin_df.describe()
pandas_result = pandas_df.describe()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"periods", int_arg_values, ids=arg_keys("periods", int_arg_keys)
)
def test_diff(self, request, data, axis, periods):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.T.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
def test_drop(self):
frame_data = {"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}
simple = pandas.DataFrame(frame_data)
modin_simple = pd.DataFrame(frame_data)
df_equals(modin_simple.drop("A", axis=1), simple[["B"]])
df_equals(modin_simple.drop(["A", "B"], axis="columns"), simple[[]])
df_equals(modin_simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
df_equals(modin_simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :])
pytest.raises(ValueError, modin_simple.drop, 5)
pytest.raises(ValueError, modin_simple.drop, "C", 1)
pytest.raises(ValueError, modin_simple.drop, [1, 5])
pytest.raises(ValueError, modin_simple.drop, ["A", "C"], 1)
# errors = 'ignore'
df_equals(modin_simple.drop(5, errors="ignore"), simple)
df_equals(modin_simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :])
df_equals(modin_simple.drop("C", axis=1, errors="ignore"), simple)
df_equals(modin_simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]])
# non-unique
nu_df = pandas.DataFrame(
zip(range(3), range(-3, 1), list("abc")), columns=["a", "a", "b"]
)
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("a", axis=1), nu_df[["b"]])
df_equals(modin_nu_df.drop("b", axis="columns"), nu_df["a"])
df_equals(modin_nu_df.drop([]), nu_df)
nu_df = nu_df.set_index(pandas.Index(["X", "Y", "X"]))
nu_df.columns = list("abc")
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :])
df_equals(modin_nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :])
# inplace cache issue
frame_data = random_state.randn(10, 3)
df = pandas.DataFrame(frame_data, columns=list("abc"))
modin_df = pd.DataFrame(frame_data, columns=list("abc"))
expected = df[~(df.b > 0)]
modin_df.drop(labels=df[df.b > 0].index, inplace=True)
df_equals(modin_df, expected)
midx = pd.MultiIndex(
levels=[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
df = pd.DataFrame(
index=midx,
columns=["big", "small"],
data=[
[45, 30],
[200, 100],
[1.5, 1],
[30, 20],
[250, 150],
[1.5, 0.8],
[320, 250],
[1, 0.8],
[0.3, 0.2],
],
)
with pytest.warns(UserWarning):
df.drop(index="length", level=1)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
frame_data = [[1, 2, 3], [3, 4, 5], [5, 6, 7]]
modin_df = pd.DataFrame(
frame_data, index=["a", "b", "c"], columns=["d", "e", "f"]
)
modin_df1 = modin_df.drop("a")
modin_df2 = modin_df.drop(index="a")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop("d", 1)
modin_df2 = modin_df.drop(columns="d")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(labels="e", axis=1)
modin_df2 = modin_df.drop(columns="e")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0)
modin_df2 = modin_df.drop(index=["a"])
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0).drop(["d"], axis=1)
modin_df2 = modin_df.drop(index=["a"], columns=["d"])
df_equals(modin_df1, modin_df2)
with pytest.raises(ValueError):
modin_df.drop(labels="a", index="b")
with pytest.raises(ValueError):
modin_df.drop(labels="a", columns="b")
with pytest.raises(ValueError):
modin_df.drop(axis=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_drop_transpose(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.T.drop(columns=[0, 1, 2])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(index=["col3", "col1"])
pandas_result = pandas_df.T.drop(index=["col3", "col1"])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
df_equals(modin_result, pandas_result)
def test_droplevel(self):
df = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
df.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
with pytest.warns(UserWarning):
df.droplevel("a")
with pytest.warns(UserWarning):
df.droplevel("level_2", axis=1)
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
@pytest.mark.parametrize(
"subset", [None, ["col1", "col3", "col7"]], ids=["None", "subset"]
)
def test_drop_duplicates(self, data, keep, subset):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
pandas_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
)
modin_results = modin_df.drop_duplicates(keep=keep, inplace=True, subset=subset)
pandas_results = pandas_df.drop_duplicates(
keep=keep, inplace=True, subset=subset
)
df_equals(modin_results, pandas_results)
def test_drop_duplicates_with_missing_index_values(self):
data = {
"columns": ["value", "time", "id"],
"index": [
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
20,
21,
22,
23,
24,
25,
26,
27,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
],
"data": [
["3", 1279213398000.0, 88.0],
["3", 1279204682000.0, 88.0],
["0", 1245772835000.0, 448.0],
["0", 1270564258000.0, 32.0],
["0", 1267106669000.0, 118.0],
["7", 1300621123000.0, 5.0],
["0", 1251130752000.0, 957.0],
["0", 1311683506000.0, 62.0],
["9", 1283692698000.0, 89.0],
["9", 1270234253000.0, 64.0],
["0", 1285088818000.0, 50.0],
["0", 1218212725000.0, 695.0],
["2", 1383933968000.0, 348.0],
["0", 1368227625000.0, 257.0],
["1", 1454514093000.0, 446.0],
["1", 1428497427000.0, 134.0],
["1", 1459184936000.0, 568.0],
["1", 1502293302000.0, 599.0],
["1", 1491833358000.0, 829.0],
["1", 1485431534000.0, 806.0],
["8", 1351800505000.0, 101.0],
["0", 1357247721000.0, 916.0],
["0", 1335804423000.0, 370.0],
["24", 1327547726000.0, 720.0],
["0", 1332334140000.0, 415.0],
["0", 1309543100000.0, 30.0],
["18", 1309541141000.0, 30.0],
["0", 1298979435000.0, 48.0],
["14", 1276098160000.0, 59.0],
["0", 1233936302000.0, 109.0],
],
}
pandas_df = pandas.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_df = pd.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_result = modin_df.sort_values(["id", "time"]).drop_duplicates(["id"])
pandas_result = pandas_df.sort_values(["id", "time"]).drop_duplicates(["id"])
df_equals(modin_result, pandas_result)
def test_drop_duplicates_after_sort(self):
data = [
{"value": 1, "time": 2},
{"value": 1, "time": 1},
{"value": 2, "time": 1},
{"value": 2, "time": 2},
]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
pandas_result = pandas_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("how", ["any", "all"], ids=["any", "all"])
def test_dropna(self, data, axis, how):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.dropna(axis=axis, how="invalid")
with pytest.raises(TypeError):
modin_df.dropna(axis=axis, how=None, thresh=None)
with pytest.raises(KeyError):
modin_df.dropna(axis=axis, subset=["NotExists"], how=how)
modin_result = modin_df.dropna(axis=axis, how=how)
pandas_result = pandas_df.dropna(axis=axis, how=how)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.dropna()
modin_df.dropna(inplace=True)
df_equals(modin_df, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(thresh=2, inplace=True)
modin_df.dropna(thresh=2, inplace=True)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(axis=1, how="any", inplace=True)
modin_df.dropna(axis=1, how="any", inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.dropna(how="all", axis=[0, 1]),
pandas_df.dropna(how="all", axis=[0, 1]),
)
df_equals(
modin_df.dropna(how="all", axis=(0, 1)),
pandas_df.dropna(how="all", axis=(0, 1)),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
pandas_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
pandas_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
column_subset = modin_df.columns[0:2]
df_equals(
modin_df.dropna(how="all", subset=column_subset),
pandas_df.dropna(how="all", subset=column_subset),
)
df_equals(
modin_df.dropna(how="any", subset=column_subset),
pandas_df.dropna(how="any", subset=column_subset),
)
row_subset = modin_df.index[0:2]
df_equals(
modin_df.dropna(how="all", axis=1, subset=row_subset),
pandas_df.dropna(how="all", axis=1, subset=row_subset),
)
df_equals(
modin_df.dropna(how="any", axis=1, subset=row_subset),
pandas_df.dropna(how="any", axis=1, subset=row_subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset_error(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
with pytest.raises(KeyError):
modin_df.dropna(subset=list("EF"))
if len(modin_df.columns) < 5:
with pytest.raises(KeyError):
modin_df.dropna(axis=1, subset=[4, 5])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
col_len = len(modin_df.columns)
# Test list input
arr = np.arange(col_len)
modin_result = modin_df.dot(arr)
pandas_result = pandas_df.dot(arr)
df_equals(modin_result, pandas_result)
# Test bad dimensions
with pytest.raises(ValueError):
modin_result = modin_df.dot(np.arange(col_len + 10))
# Test series input
modin_series = pd.Series(np.arange(col_len), index=modin_df.columns)
pandas_series = pandas.Series(np.arange(col_len), index=modin_df.columns)
modin_result = modin_df.dot(modin_series)
pandas_result = pandas_df.dot(pandas_series)
df_equals(modin_result, pandas_result)
# Test when input series index doesn't line up with columns
with pytest.raises(ValueError):
modin_result = modin_df.dot(pd.Series(np.arange(col_len)))
with pytest.warns(UserWarning):
modin_df.dot(modin_df.T)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
def test_duplicated(self, data, keep):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.duplicated(keep=keep)
modin_result = modin_df.duplicated(keep=keep)
df_equals(modin_result, pandas_result)
import random
subset = random.sample(
list(pandas_df.columns), random.randint(1, len(pandas_df.columns))
)
pandas_result = pandas_df.duplicated(keep=keep, subset=subset)
modin_result = modin_df.duplicated(keep=keep, subset=subset)
df_equals(modin_result, pandas_result)
def test_empty_df(self):
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
def test_equals(self):
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 4, 1]}
modin_df1 = pd.DataFrame(frame_data)
modin_df2 = pd.DataFrame(frame_data)
assert modin_df1.equals(modin_df2)
df_equals(modin_df1, modin_df2)
df_equals(modin_df1, pd.DataFrame(modin_df1))
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 5, 1]}
modin_df3 = pd.DataFrame(frame_data, index=list("abcd"))
assert not modin_df1.equals(modin_df3)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df1)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df2)
assert modin_df1.equals(modin_df2._query_compiler.to_pandas())
def test_eval_df_use_case(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
# test eval for series results
tmp_pandas = df.eval("arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"arctan2(sin(a), b)", engine="python", parser="pandas"
)
assert isinstance(tmp_modin, pd.Series)
df_equals(tmp_modin, tmp_pandas)
# Test not inplace assignments
tmp_pandas = df.eval("e = arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas"
)
df_equals(tmp_modin, tmp_pandas)
# Test inplace assignments
df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_eval_df_arithmetic_subexpression(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df.eval("not_e = sin(a + b)", engine="python", parser="pandas", inplace=True)
modin_df.eval(
"not_e = sin(a + b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_ewm(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.ewm(com=0.5).mean()
def test_expanding(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).expanding()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_explode(self, data):
modin_df = pd.DataFrame(data)
with pytest.warns(UserWarning):
modin_df.explode(modin_df.columns[0])
def test_ffill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.ffill(), test_data.tsframe.ffill())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"method",
["backfill", "bfill", "pad", "ffill", None],
ids=["backfill", "bfill", "pad", "ffill", "None"],
)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("limit", int_arg_values, ids=int_arg_keys)
def test_fillna(self, data, method, axis, limit):
# We are not testing when limit is not positive until pandas-27042 gets fixed.
# We are not testing when axis is over rows until pandas-17399 gets fixed.
if limit > 0 and axis != 1 and axis != "columns":
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.fillna(
0, method=method, axis=axis, limit=limit
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.fillna(0, method=method, axis=axis, limit=limit)
else:
modin_result = modin_df.fillna(0, method=method, axis=axis, limit=limit)
df_equals(modin_result, pandas_result)
def test_fillna_sanity(self):
test_data = TestData()
tf = test_data.tsframe
tf.loc[tf.index[:5], "A"] = np.nan
tf.loc[tf.index[-5:], "A"] = np.nan
zero_filled = test_data.tsframe.fillna(0)
modin_df = pd.DataFrame(test_data.tsframe).fillna(0)
df_equals(modin_df, zero_filled)
padded = test_data.tsframe.fillna(method="pad")
modin_df = pd.DataFrame(test_data.tsframe).fillna(method="pad")
df_equals(modin_df, padded)
# mixed type
mf = test_data.mixed_frame
mf.loc[mf.index[5:20], "foo"] = np.nan
mf.loc[mf.index[-10:], "A"] = np.nan
result = test_data.mixed_frame.fillna(value=0)
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(value=0)
df_equals(modin_df, result)
result = test_data.mixed_frame.fillna(method="pad")
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(method="pad")
df_equals(modin_df, result)
pytest.raises(ValueError, test_data.tsframe.fillna)
pytest.raises(ValueError, pd.DataFrame(test_data.tsframe).fillna)
with pytest.raises(ValueError):
pd.DataFrame(test_data.tsframe).fillna(5, method="ffill")
# mixed numeric (but no float16)
mf = test_data.mixed_float.reindex(columns=["A", "B", "D"])
mf.loc[mf.index[-10:], "A"] = np.nan
result = mf.fillna(value=0)
modin_df = pd.DataFrame(mf).fillna(value=0)
df_equals(modin_df, result)
result = mf.fillna(method="pad")
modin_df = pd.DataFrame(mf).fillna(method="pad")
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# empty frame
# df = DataFrame(columns=['x'])
# for m in ['pad', 'backfill']:
# df.x.fillna(method=m, inplace=True)
# df.x.fillna(method=m)
# with different dtype
frame_data = [
["a", "a", np.nan, "a"],
["b", "b", np.nan, "b"],
["c", "c", np.nan, "c"],
]
df = pandas.DataFrame(frame_data)
result = df.fillna({2: "foo"})
modin_df = pd.DataFrame(frame_data).fillna({2: "foo"})
df_equals(modin_df, result)
modin_df = pd.DataFrame(df)
df.fillna({2: "foo"}, inplace=True)
modin_df.fillna({2: "foo"}, inplace=True)
df_equals(modin_df, result)
frame_data = {
"Date": [pandas.NaT, pandas.Timestamp("2014-1-1")],
"Date2": [pandas.Timestamp("2013-1-1"), pandas.NaT],
}
df = pandas.DataFrame(frame_data)
result = df.fillna(value={"Date": df["Date2"]})
modin_df = pd.DataFrame(frame_data).fillna(value={"Date": df["Date2"]})
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# with timezone
"""
frame_data = {'A': [pandas.Timestamp('2012-11-11 00:00:00+01:00'),
pandas.NaT]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna(method='pad'), df.fillna(method='pad'))
frame_data = {'A': [pandas.NaT,
pandas.Timestamp('2012-11-11 00:00:00+01:00')]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data).fillna(method='bfill')
df_equals(modin_df, df.fillna(method='bfill'))
"""
def test_fillna_downcast(self):
# infer int64 from float64
frame_data = {"a": [1.0, np.nan]}
df = pandas.DataFrame(frame_data)
result = df.fillna(0, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna(0, downcast="infer")
df_equals(modin_df, result)
# infer int64 from float64 when fillna value is a dict
df = pandas.DataFrame(frame_data)
result = df.fillna({"a": 0}, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna({"a": 0}, downcast="infer")
df_equals(modin_df, result)
def test_ffill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="ffill"), test_data.tsframe.fillna(method="ffill")
)
def test_bfill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="bfill"), test_data.tsframe.fillna(method="bfill")
)
def test_fillna_inplace(self):
frame_data = random_state.randn(10, 4)
df = pandas.DataFrame(frame_data)
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(value=0, inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(value=0, inplace=True)
df_equals(modin_df, df)
modin_df = pd.DataFrame(df).fillna(value={0: 0}, inplace=True)
assert modin_df is None
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(method="ffill", inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(method="ffill", inplace=True)
df_equals(modin_df, df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_fillna_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_pad_backfill_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = pandas.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
modin_df = pd.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
df_equals(modin_df.fillna("nan"), df.fillna("nan"))
frame_data = {"A": [1, np.nan], "B": [1.0, 2.0]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
for v in ["", 1, np.nan, 1.0]:
df_equals(modin_df.fillna(v), df.fillna(v))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_skip_certain_blocks(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# don't try to fill boolean, int blocks
df_equals(modin_df.fillna(np.nan), pandas_df.fillna(np.nan))
def test_fillna_dict_series(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna({"a": 0, "b": 5}), df.fillna({"a": 0, "b": 5}))
df_equals(
modin_df.fillna({"a": 0, "b": 5, "d": 7}),
df.fillna({"a": 0, "b": 5, "d": 7}),
)
# Series treated same as dict
df_equals(modin_df.fillna(modin_df.max()), df.fillna(df.max()))
def test_fillna_dataframe(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data, index=list("VWXYZ"))
modin_df = pd.DataFrame(frame_data, index=list("VWXYZ"))
# df2 may have different index and columns
df2 = pandas.DataFrame(
{
"a": [np.nan, 10, 20, 30, 40],
"b": [50, 60, 70, 80, 90],
"foo": ["bar"] * 5,
},
index=list("VWXuZ"),
)
modin_df2 = pd.DataFrame(df2)
# only those columns and indices which are shared get filled
df_equals(modin_df.fillna(modin_df2), df.fillna(df2))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_columns(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_invalid_method(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with tm.assert_raises_regex(ValueError, "ffil"):
modin_df.fillna(method="ffil")
def test_fillna_invalid_value(self):
test_data = TestData()
modin_df = pd.DataFrame(test_data.frame)
# list
pytest.raises(TypeError, modin_df.fillna, [1, 2])
# tuple
pytest.raises(TypeError, modin_df.fillna, (1, 2))
# frame with series
pytest.raises(TypeError, modin_df.iloc[:, 0].fillna, modin_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_col_reordering(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.fillna(method="ffill"), pandas_df.fillna(method="ffill"))
"""
TODO: Use this when Arrow issue resolves:
(https://issues.apache.org/jira/browse/ARROW-2122)
def test_fillna_datetime_columns(self):
frame_data = {'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
frame_data = {'A': [-1, -2, np.nan],
'B': [pandas.Timestamp('2013-01-01'),
pandas.Timestamp('2013-01-02'), pandas.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
"""
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_filter(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
by = {"items": ["col1", "col5"], "regex": "4$|3$", "like": "col"}
df_equals(
modin_df.filter(items=by["items"]), pandas_df.filter(items=by["items"])
)
df_equals(
modin_df.filter(regex=by["regex"], axis=0),
pandas_df.filter(regex=by["regex"], axis=0),
)
df_equals(
modin_df.filter(regex=by["regex"], axis=1),
pandas_df.filter(regex=by["regex"], axis=1),
)
df_equals(modin_df.filter(like=by["like"]), pandas_df.filter(like=by["like"]))
with pytest.raises(TypeError):
modin_df.filter(items=by["items"], regex=by["regex"])
with pytest.raises(TypeError):
modin_df.filter()
def test_first(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.first("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_first_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.first_valid_index() == (pandas_df.first_valid_index())
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_dict(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_dict(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_items(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_items(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_records(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_records(None)
def test_get_value(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_value(0, "col1")
def test_get_values(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_values()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_head(self, data, n):
# Test normal dataframe head
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.head(n), pandas_df.head(n))
df_equals(modin_df.head(len(modin_df) + 1), pandas_df.head(len(pandas_df) + 1))
# Test head when we call it from a QueryCompilerView
modin_result = modin_df.loc[:, ["col1", "col3", "col3"]].head(n)
pandas_result = pandas_df.loc[:, ["col1", "col3", "col3"]].head(n)
df_equals(modin_result, pandas_result)
def test_hist(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).hist(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iat(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.iat()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmax(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
pandas_result = pandas_df.T.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.T.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmin(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.T.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
def test_infer_objects(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).infer_objects()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, ["empty_data"]):
# Scaler
np.testing.assert_equal(modin_df.iloc[0, 1], pandas_df.iloc[0, 1])
# Series
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.iloc[1:, 0], pandas_df.iloc[1:, 0])
df_equals(modin_df.iloc[1:2, 0], pandas_df.iloc[1:2, 0])
# DataFrame
df_equals(modin_df.iloc[[1, 2]], pandas_df.iloc[[1, 2]])
# See issue #80
# df_equals(modin_df.iloc[[1, 2], [1, 0]], pandas_df.iloc[[1, 2], [1, 0]])
df_equals(modin_df.iloc[1:2, 0:2], pandas_df.iloc[1:2, 0:2])
# Issue #43
modin_df.iloc[0:3, :]
# Write Item
modin_df.iloc[[1, 2]] = 42
pandas_df.iloc[[1, 2]] = 42
df_equals(modin_df, pandas_df)
else:
with pytest.raises(IndexError):
modin_df.iloc[0, 1]
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.index, pandas_df.index)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.index = [str(i) for i in modin_df_cp.index]
pandas_df_cp.index = [str(i) for i in pandas_df_cp.index]
df_equals(modin_df_cp.index, pandas_df_cp.index)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_indexing_duplicate_axis(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.index = pandas_df.index = [i // 3 for i in range(len(modin_df))]
assert any(modin_df.index.duplicated())
assert any(pandas_df.index.duplicated())
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.iloc[0, 0:4], pandas_df.iloc[0, 0:4])
df_equals(
modin_df.loc[0, modin_df.columns[0:4]],
pandas_df.loc[0, pandas_df.columns[0:4]],
)
def test_info(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).info(memory_usage="deep")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("loc", int_arg_values, ids=arg_keys("loc", int_arg_keys))
def test_insert(self, data, loc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df = modin_df.copy()
pandas_df = pandas_df.copy()
column = "New Column"
value = modin_df.iloc[:, 0]
try:
pandas_df.insert(loc, column, value)
except Exception as e:
with pytest.raises(type(e)):
modin_df.insert(loc, column, value)
else:
modin_df.insert(loc, column, value)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Bad Column", modin_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Duplicate", modin_df[modin_df.columns[0]])
pandas_df.insert(0, "Duplicate", pandas_df[pandas_df.columns[0]])
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Scalar", 100)
pandas_df.insert(0, "Scalar", 100)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Too Short", list(modin_df[modin_df.columns[0]])[:-1])
with pytest.raises(ValueError):
modin_df.insert(0, modin_df.columns[0], modin_df[modin_df.columns[0]])
with pytest.raises(IndexError):
modin_df.insert(len(modin_df.columns) + 100, "Bad Loc", 100)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(columns=list("ab")).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(columns=list("ab")).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(index=modin_df.index).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(index=pandas_df.index).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.insert(
0, "DataFrame insert", modin_df[[modin_df.columns[0]]]
)
pandas_result = pandas_df.insert(
0, "DataFrame insert", pandas_df[[pandas_df.columns[0]]]
)
df_equals(modin_result, pandas_result)
def test_interpolate(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).interpolate()
def test_is_copy(self):
data = test_data_values[0]
with pytest.warns(FutureWarning):
assert pd.DataFrame(data).is_copy == pandas.DataFrame(data).is_copy
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_items(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.items()
pandas_items = pandas_df.items()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iteritems(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.iteritems()
pandas_items = pandas_df.iteritems()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iterrows(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_iterrows = modin_df.iterrows()
pandas_iterrows = pandas_df.iterrows()
for modin_row, pandas_row in zip(modin_iterrows, pandas_iterrows):
modin_index, modin_series = modin_row
pandas_index, pandas_series = pandas_row
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_itertuples(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# test default
modin_it_default = modin_df.itertuples()
pandas_it_default = pandas_df.itertuples()
for modin_row, pandas_row in zip(modin_it_default, pandas_it_default):
np.testing.assert_equal(modin_row, pandas_row)
# test all combinations of custom params
indices = [True, False]
names = [None, "NotPandas", "Pandas"]
for index in indices:
for name in names:
modin_it_custom = modin_df.itertuples(index=index, name=name)
pandas_it_custom = pandas_df.itertuples(index=index, name=name)
for modin_row, pandas_row in zip(modin_it_custom, pandas_it_custom):
np.testing.assert_equal(modin_row, pandas_row)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.ix()
def test_join(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col5": [0], "col6": [1]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["left", "right", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join(modin_df2, how=how)
pandas_join = pandas_df.join(pandas_df2, how=how)
df_equals(modin_join, pandas_join)
frame_data3 = {"col7": [1, 2, 3, 5, 6, 7, 8]}
modin_df3 = pd.DataFrame(frame_data3)
pandas_df3 = pandas.DataFrame(frame_data3)
join_types = ["left", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join([modin_df2, modin_df3], how=how)
pandas_join = pandas_df.join([pandas_df2, pandas_df3], how=how)
df_equals(modin_join, pandas_join)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_keys(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.keys(), pandas_df.keys())
def test_kurt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurt()
def test_kurtosis(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurtosis()
def test_last(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.last("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_last_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.last_valid_index() == (pandas_df.last_valid_index())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_loc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
key2 = modin_df.columns[1]
# Scaler
assert modin_df.loc[0, key1] == pandas_df.loc[0, key1]
# Series
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.loc[1:, key1], pandas_df.loc[1:, key1])
df_equals(modin_df.loc[1:2, key1], pandas_df.loc[1:2, key1])
# DataFrame
df_equals(modin_df.loc[[1, 2]], pandas_df.loc[[1, 2]])
# List-like of booleans
indices = [
True if i % 3 == 0 else False for i in range(len(modin_df.index))
]
columns = [
True if i % 5 == 0 else False for i in range(len(modin_df.columns))
]
modin_result = modin_df.loc[indices, columns]
pandas_result = pandas_df.loc[indices, columns]
df_equals(modin_result, pandas_result)
# See issue #80
# df_equals(modin_df.loc[[1, 2], ['col1']], pandas_df.loc[[1, 2], ['col1']])
df_equals(modin_df.loc[1:2, key1:key2], pandas_df.loc[1:2, key1:key2])
# From issue #421
df_equals(modin_df.loc[:, [key2, key1]], pandas_df.loc[:, [key2, key1]])
df_equals(modin_df.loc[[2, 1], :], pandas_df.loc[[2, 1], :])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.loc[[1, 2]] = 42
pandas_df_copy.loc[[1, 2]] = 42
df_equals(modin_df_copy, pandas_df_copy)
def test_loc_multi_index(self):
modin_df = pd.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
pandas_df = pandas.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
df_equals(modin_df.loc[1], pandas_df.loc[1])
df_equals(modin_df.loc[1, "Presidents"], pandas_df.loc[1, "Presidents"])
df_equals(
modin_df.loc[1, ("Presidents", "Pure mentions")],
pandas_df.loc[1, ("Presidents", "Pure mentions")],
)
assert (
modin_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
== pandas_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
)
df_equals(
modin_df.loc[(1, 2), "Presidents"], pandas_df.loc[(1, 2), "Presidents"]
)
tuples = [
("bar", "one"),
("bar", "two"),
("bar", "three"),
("bar", "four"),
("baz", "one"),
("baz", "two"),
("baz", "three"),
("baz", "four"),
("foo", "one"),
("foo", "two"),
("foo", "three"),
("foo", "four"),
("qux", "one"),
("qux", "two"),
("qux", "three"),
("qux", "four"),
]
modin_index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"])
pandas_index = pandas.MultiIndex.from_tuples(tuples, names=["first", "second"])
frame_data = np.random.randint(0, 100, size=(16, 100))
modin_df = pd.DataFrame(
frame_data,
index=modin_index,
columns=["col{}".format(i) for i in range(100)],
)
pandas_df = pandas.DataFrame(
frame_data,
index=pandas_index,
columns=["col{}".format(i) for i in range(100)],
)
df_equals(modin_df.loc["bar", "col1"], pandas_df.loc["bar", "col1"])
assert (
modin_df.loc[("bar", "one"), "col1"]
== pandas_df.loc[("bar", "one"), "col1"]
)
df_equals(
modin_df.loc["bar", ("col1", "col2")],
pandas_df.loc["bar", ("col1", "col2")],
)
def test_lookup(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).lookup([0, 1], ["col1", "col2"])
def test_mad(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).mad()
def test_mask(self):
df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
m = df % 3 == 0
with pytest.warns(UserWarning):
try:
df.mask(~m, -df)
except ValueError:
pass
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_max(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_mean(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_median(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
class TestDFPartTwo:
def test_melt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).melt()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"index", bool_arg_values, ids=arg_keys("index", bool_arg_keys)
)
def test_memory_usage(self, data, index):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
modin_result = modin_df.memory_usage(index=index)
pandas_result = pandas_df.memory_usage(index=index)
df_equals(modin_result, pandas_result)
def test_merge(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col1": [0, 1, 2], "col2": [1, 5, 6]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["outer", "inner"]
for how in join_types:
# Defaults
modin_result = modin_df.merge(modin_df2, how=how)
pandas_result = pandas_df.merge(pandas_df2, how=how)
df_equals(modin_result, pandas_result)
# left_on and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_index=True
)
df_equals(modin_result, pandas_result)
# left_index and right_on
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col1
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col2
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col2", right_on="col2"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col2", right_on="col2"
)
df_equals(modin_result, pandas_result)
# left_index and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_index=True
)
df_equals(modin_result, pandas_result)
# Named Series promoted to DF
s = pd.Series(frame_data2.get("col1"))
with pytest.raises(ValueError):
modin_df.merge(s)
s = pd.Series(frame_data2.get("col1"), name="col1")
df_equals(modin_df.merge(s), modin_df.merge(modin_df2[["col1"]]))
with pytest.raises(ValueError):
modin_df.merge("Non-valid type")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_min(self, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.min(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.min(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_mode(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.mode(axis=axis, numeric_only=numeric_only)
except Exception:
with pytest.raises(TypeError):
modin_df.mode(axis=axis, numeric_only=numeric_only)
else:
modin_result = modin_df.mode(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ndim(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.ndim == pandas_df.ndim
def test_nlargest(self):
df = pd.DataFrame(
{
"population": [
59000000,
65000000,
434000,
434000,
434000,
337000,
11300,
11300,
11300,
],
"GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
"alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
},
index=[
"Italy",
"France",
"Malta",
"Maldives",
"Brunei",
"Iceland",
"Nauru",
"Tuvalu",
"Anguilla",
],
)
with pytest.warns(UserWarning):
df.nlargest(3, "population")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notna(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notna(), pandas_df.notna())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notnull(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notnull(), pandas_df.notnull())
def test_nsmallest(self):
df = pd.DataFrame(
{
"population": [
59000000,
65000000,
434000,
434000,
434000,
337000,
11300,
11300,
11300,
],
"GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
"alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
},
index=[
"Italy",
"France",
"Malta",
"Maldives",
"Brunei",
"Iceland",
"Nauru",
"Tuvalu",
"Anguilla",
],
)
with pytest.warns(UserWarning):
df.nsmallest(3, "population")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"dropna", bool_arg_values, ids=arg_keys("dropna", bool_arg_keys)
)
def test_nunique(self, data, axis, dropna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.nunique(axis=axis, dropna=dropna)
pandas_result = pandas_df.nunique(axis=axis, dropna=dropna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.nunique(axis=axis, dropna=dropna)
pandas_result = pandas_df.T.nunique(axis=axis, dropna=dropna)
df_equals(modin_result, pandas_result)
def test_pct_change(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).pct_change()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pipe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
n = len(modin_df.index)
a, b, c = 2 % n, 0, 3 % n
col = modin_df.columns[3 % len(modin_df.columns)]
def h(x):
return x.drop(columns=[col])
def g(x, arg1=0):
for _ in range(arg1):
x = x.append(x)
return x
def f(x, arg2=0, arg3=0):
return x.drop([arg2, arg3])
df_equals(
f(g(h(modin_df), arg1=a), arg2=b, arg3=c),
(modin_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
df_equals(
(modin_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
(pandas_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
def test_pivot(self):
df = pd.DataFrame(
{
"foo": ["one", "one", "one", "two", "two", "two"],
"bar": ["A", "B", "C", "A", "B", "C"],
"baz": [1, 2, 3, 4, 5, 6],
"zoo": ["x", "y", "z", "q", "w", "t"],
}
)
with pytest.warns(UserWarning):
df.pivot(index="foo", columns="bar", values="baz")
def test_pivot_table(self):
df = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
}
)
with pytest.warns(UserWarning):
df.pivot_table(values="D", index=["A", "B"], columns=["C"], aggfunc=np.sum)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_plot(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
# We have to test this way because equality in plots means same object.
zipped_plot_lines = zip(modin_df.plot().lines, pandas_df.plot().lines)
for l, r in zipped_plot_lines:
if isinstance(l.get_xdata(), np.ma.core.MaskedArray) and isinstance(
r.get_xdata(), np.ma.core.MaskedArray
):
assert all((l.get_xdata() == r.get_xdata()).data)
else:
assert np.array_equal(l.get_xdata(), r.get_xdata())
if isinstance(l.get_ydata(), np.ma.core.MaskedArray) and isinstance(
r.get_ydata(), np.ma.core.MaskedArray
):
assert all((l.get_ydata() == r.get_ydata()).data)
else:
assert np.array_equal(l.get_xdata(), r.get_xdata())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pop(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
temp_modin_df = modin_df.copy()
temp_pandas_df = pandas_df.copy()
modin_popped = temp_modin_df.pop(key)
pandas_popped = temp_pandas_df.pop(key)
df_equals(modin_popped, pandas_popped)
df_equals(temp_modin_df, temp_pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_prod(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.prod(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.prod(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.T.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_product(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.product(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.product(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.product(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("q", quantiles_values, ids=quantiles_keys)
def test_quantile(self, request, data, q):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, no_numeric_dfs):
df_equals(modin_df.quantile(q), pandas_df.quantile(q))
df_equals(modin_df.quantile(q, axis=1), pandas_df.quantile(q, axis=1))
try:
pandas_result = pandas_df.quantile(q, axis=1, numeric_only=False)
except Exception as e:
with pytest.raises(type(e)):
modin_df.quantile(q, axis=1, numeric_only=False)
else:
modin_result = modin_df.quantile(q, axis=1, numeric_only=False)
df_equals(modin_result, pandas_result)
else:
with pytest.raises(ValueError):
modin_df.quantile(q)
if not name_contains(request.node.name, no_numeric_dfs):
df_equals(modin_df.T.quantile(q), pandas_df.T.quantile(q))
df_equals(modin_df.T.quantile(q, axis=1), pandas_df.T.quantile(q, axis=1))
try:
pandas_result = pandas_df.T.quantile(q, axis=1, numeric_only=False)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.quantile(q, axis=1, numeric_only=False)
else:
modin_result = modin_df.T.quantile(q, axis=1, numeric_only=False)
df_equals(modin_result, pandas_result)
else:
with pytest.raises(ValueError):
modin_df.T.quantile(q)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("funcs", query_func_values, ids=query_func_keys)
def test_query(self, data, funcs):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.query("")
with pytest.raises(NotImplementedError):
x = 2 # noqa F841
modin_df.query("col1 < @x")
try:
pandas_result = pandas_df.query(funcs)
except Exception as e:
with pytest.raises(type(e)):
modin_df.query(funcs)
else:
modin_result = modin_df.query(funcs)
df_equals(modin_result, pandas_result)
def test_query_after_insert(self):
modin_df = pd.DataFrame({"x": [-1, 0, 1, None], "y": [1, 2, None, 3]})
modin_df["z"] = modin_df.eval("x / y")
modin_df = modin_df.query("z >= 0")
modin_result = modin_df.reset_index(drop=True)
modin_result.columns = ["a", "b", "c"]
pandas_df = pd.DataFrame({"x": [-1, 0, 1, None], "y": [1, 2, None, 3]})
pandas_df["z"] = pandas_df.eval("x / y")
pandas_df = pandas_df.query("z >= 0")
pandas_result = pandas_df.reset_index(drop=True)
pandas_result.columns = ["a", "b", "c"]
df_equals(modin_result, pandas_result)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"na_option", ["keep", "top", "bottom"], ids=["keep", "top", "bottom"]
)
def test_rank(self, data, axis, numeric_only, na_option):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.rank(
axis=axis, numeric_only=numeric_only, na_option=na_option
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.rank(axis=axis, numeric_only=numeric_only, na_option=na_option)
else:
modin_result = modin_df.rank(
axis=axis, numeric_only=numeric_only, na_option=na_option
)
df_equals(modin_result, pandas_result)
def test_reindex(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.reindex([0, 3, 2, 1]), pandas_df.reindex([0, 3, 2, 1]))
df_equals(modin_df.reindex([0, 6, 2]), pandas_df.reindex([0, 6, 2]))
df_equals(
modin_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
pandas_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
)
df_equals(
modin_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
pandas_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
)
df_equals(
modin_df.reindex(index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]),
pandas_df.reindex(
index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]
),
)
df_equals(
modin_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
pandas_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
)
def test_reindex_like(self):
df1 = pd.DataFrame(
[
[24.3, 75.7, "high"],
[31, 87.8, "high"],
[22, 71.6, "medium"],
[35, 95, "medium"],
],
columns=["temp_celsius", "temp_fahrenheit", "windspeed"],
index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"),
)
df2 = pd.DataFrame(
[[28, "low"], [30, "low"], [35.1, "medium"]],
columns=["temp_celsius", "windspeed"],
index=pd.DatetimeIndex(["2014-02-12", "2014-02-13", "2014-02-15"]),
)
with pytest.warns(UserWarning):
df2.reindex_like(df1)
def test_rename_sanity(self):
test_data = TestData()
mapping = {"A": "a", "B": "b", "C": "c", "D": "d"}
modin_df = pd.DataFrame(test_data.frame)
df_equals(
modin_df.rename(columns=mapping), test_data.frame.rename(columns=mapping)
)
renamed2 = test_data.frame.rename(columns=str.lower)
df_equals(modin_df.rename(columns=str.lower), renamed2)
modin_df = pd.DataFrame(renamed2)
df_equals(
modin_df.rename(columns=str.upper), renamed2.rename(columns=str.upper)
)
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
tm.assert_index_equal(
modin_df.rename(index={"foo": "bar", "bar": "foo"}).index,
df.rename(index={"foo": "bar", "bar": "foo"}).index,
)
tm.assert_index_equal(
modin_df.rename(index=str.upper).index, df.rename(index=str.upper).index
)
# have to pass something
with pytest.raises(TypeError):
modin_df.rename()
# partial columns
renamed = test_data.frame.rename(columns={"C": "foo", "D": "bar"})
modin_df = pd.DataFrame(test_data.frame)
tm.assert_index_equal(
modin_df.rename(columns={"C": "foo", "D": "bar"}).index,
test_data.frame.rename(columns={"C": "foo", "D": "bar"}).index,
)
# TODO: Uncomment when transpose works
# other axis
# renamed = test_data.frame.T.rename(index={'C': 'foo', 'D': 'bar'})
# tm.assert_index_equal(
# test_data.frame.T.rename(index={'C': 'foo', 'D': 'bar'}).index,
# modin_df.T.rename(index={'C': 'foo', 'D': 'bar'}).index)
# index with name
index = pandas.Index(["foo", "bar"], name="name")
renamer = pandas.DataFrame(data, index=index)
modin_df = pd.DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
modin_renamed = modin_df.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, modin_renamed.index)
assert renamed.index.name == modin_renamed.index.name
def test_rename_multiindex(self):
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = pandas.MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = pandas.MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
frame_data = [(0, 0), (1, 1)]
df = pandas.DataFrame(frame_data, index=index, columns=columns)
modin_df = pd.DataFrame(frame_data, index=index, columns=columns)
#
# without specifying level -> accross all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
modin_renamed = modin_df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
tm.assert_index_equal(renamed.index, modin_renamed.index)
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
assert renamed.index.names == modin_renamed.index.names
assert renamed.columns.names == modin_renamed.columns.names
#
# with specifying a level
# dict
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz"
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz"
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
# function
func = str.upper
renamed = df.rename(columns=func, level=0)
modin_renamed = modin_df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="fizz")
modin_renamed = modin_df.rename(columns=func, level="fizz")
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level=1)
modin_renamed = modin_df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="buzz")
modin_renamed = modin_df.rename(columns=func, level="buzz")
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
# index
renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
modin_renamed = modin_df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
tm.assert_index_equal(modin_renamed.index, renamed.index)
@pytest.mark.skip(reason="Pandas does not pass this test")
def test_rename_nocopy(self):
test_data = TestData().frame
modin_df = pd.DataFrame(test_data)
modin_renamed = modin_df.rename(columns={"C": "foo"}, copy=False)
modin_renamed["foo"] = 1
assert (modin_df["C"] == 1).all()
def test_rename_inplace(self):
test_data = TestData().frame
modin_df = pd.DataFrame(test_data)
df_equals(
modin_df.rename(columns={"C": "foo"}),
test_data.rename(columns={"C": "foo"}),
)
frame = test_data.copy()
modin_frame = modin_df.copy()
frame.rename(columns={"C": "foo"}, inplace=True)
modin_frame.rename(columns={"C": "foo"}, inplace=True)
df_equals(modin_frame, frame)
def test_rename_bug(self):
# rename set ref_locs, and set_index was not resetting
frame_data = {0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df = df.rename(columns={0: "a"})
df = df.rename(columns={1: "b"})
# TODO: Uncomment when set_index is implemented
# df = df.set_index(['a', 'b'])
# df.columns = ['2001-01-01']
modin_df = modin_df.rename(columns={0: "a"})
modin_df = modin_df.rename(columns={1: "b"})
# TODO: Uncomment when set_index is implemented
# modin_df = modin_df.set_index(['a', 'b'])
# modin_df.columns = ['2001-01-01']
df_equals(modin_df, df)
def test_rename_axis(self):
data = {"num_legs": [4, 4, 2], "num_arms": [0, 0, 2]}
index = ["dog", "cat", "monkey"]
modin_df = pd.DataFrame(data, index)
pandas_df = pandas.DataFrame(data, index)
df_equals(modin_df.rename_axis("animal"), pandas_df.rename_axis("animal"))
df_equals(
modin_df.rename_axis("limbs", axis="columns"),
pandas_df.rename_axis("limbs", axis="columns"),
)
modin_df.rename_axis("limbs", axis="columns", inplace=True)
pandas_df.rename_axis("limbs", axis="columns", inplace=True)
df_equals(modin_df, pandas_df)
new_index = pd.MultiIndex.from_product(
[["mammal"], ["dog", "cat", "monkey"]], names=["type", "name"]
)
modin_df.index = new_index
pandas_df.index = new_index
df_equals(
modin_df.rename_axis(index={"type": "class"}),
pandas_df.rename_axis(index={"type": "class"}),
)
df_equals(
modin_df.rename_axis(columns=str.upper),
pandas_df.rename_axis(columns=str.upper),
)
df_equals(
modin_df.rename_axis(
columns=[str.upper(o) for o in modin_df.columns.names]
),
pandas_df.rename_axis(
columns=[str.upper(o) for o in pandas_df.columns.names]
),
)
with pytest.raises(ValueError):
df_equals(
modin_df.rename_axis(str.upper, axis=1),
pandas_df.rename_axis(str.upper, axis=1),
)
def test_rename_axis_inplace(self):
test_frame = TestData().frame
modin_df = pd.DataFrame(test_frame)
result = test_frame.copy()
modin_result = modin_df.copy()
no_return = result.rename_axis("foo", inplace=True)
modin_no_return = modin_result.rename_axis("foo", inplace=True)
assert no_return is modin_no_return
df_equals(modin_result, result)
result = test_frame.copy()
modin_result = modin_df.copy()
no_return = result.rename_axis("bar", axis=1, inplace=True)
modin_no_return = modin_result.rename_axis("bar", axis=1, inplace=True)
assert no_return is modin_no_return
df_equals(modin_result, result)
def test_reorder_levels(self):
df = pd.DataFrame(
index=pd.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
)
)
df["Value"] = np.random.randint(1, 100, len(df))
with pytest.warns(UserWarning):
df.reorder_levels(["Letter", "Color", "Number"])
def test_replace(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).replace()
def test_resample(self):
d = dict(
{
"price": [10, 11, 9, 13, 14, 18, 17, 19],
"volume": [50, 60, 40, 100, 50, 100, 40, 50],
}
)
df = pd.DataFrame(d)
df["week_starting"] = pd.date_range("01/01/2018", periods=8, freq="W")
with pytest.warns(UserWarning):
df.resample("M", on="week_starting")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_reset_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.reset_index(inplace=False)
pandas_result = pandas_df.reset_index(inplace=False)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pd_df_cp = pandas_df.copy()
modin_df_cp.reset_index(inplace=True)
pd_df_cp.reset_index(inplace=True)
df_equals(modin_df_cp, pd_df_cp)
def test_rolling(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.rolling(2, win_type="triang")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_round(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.round(), pandas_df.round())
df_equals(modin_df.round(1), pandas_df.round(1))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_sample(self, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.sample(n=3, frac=0.4, axis=axis)
with pytest.raises(KeyError):
modin_df.sample(frac=0.5, weights="CoLuMn_No_ExIsT", axis=0)
with pytest.raises(ValueError):
modin_df.sample(frac=0.5, weights=modin_df.columns[0], axis=1)
with pytest.raises(ValueError):
modin_df.sample(
frac=0.5, weights=[0.5 for _ in range(len(modin_df.index[:-1]))], axis=0
)
with pytest.raises(ValueError):
modin_df.sample(
frac=0.5,
weights=[0.5 for _ in range(len(modin_df.columns[:-1]))],
axis=1,
)
with pytest.raises(ValueError):
modin_df.sample(n=-3, axis=axis)
with pytest.raises(ValueError):
modin_df.sample(frac=0.2, weights=pandas.Series(), axis=axis)
if isinstance(axis, str):
num_axis = pandas.DataFrame()._get_axis_number(axis)
else:
num_axis = axis
# weights that sum to 1
sums = sum(i % 2 for i in range(len(modin_df.axes[num_axis])))
weights = [i % 2 / sums for i in range(len(modin_df.axes[num_axis]))]
modin_result = modin_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
pandas_result = pandas_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
df_equals(modin_result, pandas_result)
# weights that don't sum to 1
weights = [i % 2 for i in range(len(modin_df.axes[num_axis]))]
modin_result = modin_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
pandas_result = pandas_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(n=0, axis=axis)
pandas_result = pandas_df.sample(n=0, axis=axis)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(frac=0.5, random_state=42, axis=axis)
pandas_result = pandas_df.sample(frac=0.5, random_state=42, axis=axis)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(n=2, random_state=42, axis=axis)
pandas_result = pandas_df.sample(n=2, random_state=42, axis=axis)
df_equals(modin_result, pandas_result)
def test_select_dtypes(self):
frame_data = {
"test1": list("abc"),
"test2": np.arange(3, 6).astype("u1"),
"test3": np.arange(8.0, 11.0, dtype="float64"),
"test4": [True, False, True],
"test5": pandas.date_range("now", periods=3).values,
"test6": list(range(5, 8)),
}
df = pandas.DataFrame(frame_data)
rd = pd.DataFrame(frame_data)
include = np.float, "integer"
exclude = (np.bool_,)
r = rd.select_dtypes(include=include, exclude=exclude)
e = df[["test2", "test3", "test6"]]
df_equals(r, e)
r = rd.select_dtypes(include=np.bool_)
e = df[["test4"]]
df_equals(r, e)
r = rd.select_dtypes(exclude=np.bool_)
e = df[["test1", "test2", "test3", "test5", "test6"]]
df_equals(r, e)
try:
pd.DataFrame().select_dtypes()
assert False
except ValueError:
assert True
def test_sem(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).sem()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_set_axis(self, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
x = pandas.DataFrame()._get_axis_number(axis)
index = modin_df.columns if x else modin_df.index
labels = ["{0}_{1}".format(index[i], i) for i in range(modin_df.shape[x])]
modin_result = modin_df.set_axis(labels, axis=axis, inplace=False)
pandas_result = pandas_df.set_axis(labels, axis=axis, inplace=False)
df_equals(modin_result, pandas_result)
with pytest.warns(FutureWarning):
modin_df.set_axis(axis, labels, inplace=False)
modin_df_copy = modin_df.copy()
modin_df.set_axis(labels, axis=axis, inplace=True)
# Check that the copy and original are different
try:
df_equals(modin_df, modin_df_copy)
except AssertionError:
assert True
else:
assert False
pandas_df.set_axis(labels, axis=axis, inplace=True)
df_equals(modin_df, pandas_df)
with pytest.warns(FutureWarning):
modin_df.set_axis(labels, axis=axis, inplace=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"drop", bool_arg_values, ids=arg_keys("drop", bool_arg_keys)
)
@pytest.mark.parametrize(
"append", bool_arg_values, ids=arg_keys("append", bool_arg_keys)
)
def test_set_index(self, request, data, drop, append):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.set_index(
key, drop=drop, append=append, inplace=False
)
pandas_result = pandas_df.set_index(
key, drop=drop, append=append, inplace=False
)
df_equals(modin_result, pandas_result)
modin_df_copy = modin_df.copy()
modin_df.set_index(key, drop=drop, append=append, inplace=True)
# Check that the copy and original are different
try:
df_equals(modin_df, modin_df_copy)
except AssertionError:
assert True
else:
assert False
pandas_df.set_index(key, drop=drop, append=append, inplace=True)
df_equals(modin_df, pandas_df)
def test_set_value(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).set_value(0, 0, 0)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_shape(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.shape == pandas_df.shape
def test_shift(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).shift()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_size(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.size == pandas_df.size
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_skew(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.skew(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.skew(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
def test_slice_shift(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).slice_shift()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"ascending", bool_arg_values, ids=arg_keys("ascending", bool_arg_keys)
)
@pytest.mark.parametrize("na_position", ["first", "last"], ids=["first", "last"])
@pytest.mark.parametrize(
"sort_remaining", bool_arg_values, ids=arg_keys("sort_remaining", bool_arg_keys)
)
def test_sort_index(self, data, axis, ascending, na_position, sort_remaining):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# Change index value so sorting will actually make a difference
if axis == "rows" or axis == 0:
length = len(modin_df.index)
modin_df.index = [(i - length / 2) % length for i in range(length)]
pandas_df.index = [(i - length / 2) % length for i in range(length)]
# Add NaNs to sorted index
if axis == "rows" or axis == 0:
length = len(modin_df.index)
modin_df.index = [
np.nan if i % 2 == 0 else modin_df.index[i] for i in range(length)
]
pandas_df.index = [
np.nan if i % 2 == 0 else pandas_df.index[i] for i in range(length)
]
else:
length = len(modin_df.columns)
modin_df.columns = [
np.nan if i % 2 == 0 else modin_df.columns[i] for i in range(length)
]
pandas_df.columns = [
np.nan if i % 2 == 0 else pandas_df.columns[i] for i in range(length)
]
modin_result = modin_df.sort_index(
axis=axis, ascending=ascending, na_position=na_position, inplace=False
)
pandas_result = pandas_df.sort_index(
axis=axis, ascending=ascending, na_position=na_position, inplace=False
)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.sort_index(
axis=axis, ascending=ascending, na_position=na_position, inplace=True
)
pandas_df_cp.sort_index(
axis=axis, ascending=ascending, na_position=na_position, inplace=True
)
df_equals(modin_df_cp, pandas_df_cp)
# MultiIndex
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.index = pd.MultiIndex.from_tuples(
[(i // 10, i // 5, i) for i in range(len(modin_df))]
)
pandas_df.index = pandas.MultiIndex.from_tuples(
[(i // 10, i // 5, i) for i in range(len(pandas_df))]
)
with pytest.warns(UserWarning):
df_equals(modin_df.sort_index(level=0), pandas_df.sort_index(level=0))
with pytest.warns(UserWarning):
df_equals(modin_df.sort_index(axis=0), pandas_df.sort_index(axis=0))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"ascending", bool_arg_values, ids=arg_keys("ascending", bool_arg_keys)
)
@pytest.mark.parametrize("na_position", ["first", "last"], ids=["first", "last"])
def test_sort_values(self, request, data, axis, ascending, na_position):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name and (
(axis == 0 or axis == "over rows")
or name_contains(request.node.name, numeric_dfs)
):
index = (
modin_df.index if axis == 1 or axis == "columns" else modin_df.columns
)
key = index[0]
modin_result = modin_df.sort_values(
key,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=False,
)
pandas_result = pandas_df.sort_values(
key,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=False,
)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.sort_values(
key,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=True,
)
pandas_df_cp.sort_values(
key,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=True,
)
df_equals(modin_df_cp, pandas_df_cp)
keys = [key, index[-1]]
modin_result = modin_df.sort_values(
keys,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=False,
)
pandas_result = pandas_df.sort_values(
keys,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=False,
)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.sort_values(
keys,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=True,
)
pandas_df_cp.sort_values(
keys,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=True,
)
df_equals(modin_df_cp, pandas_df_cp)
def test_squeeze(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
frame_data_2 = {"col1": [0, 1, 2, 3]}
frame_data_3 = {
"col1": [0],
"col2": [4],
"col3": [8],
"col4": [12],
"col5": [0],
}
frame_data_4 = {"col1": [2]}
frame_data_5 = {"col1": ["string"]}
# Different data for different cases
pandas_df = pandas.DataFrame(frame_data).squeeze()
ray_df = pd.DataFrame(frame_data).squeeze()
df_equals(ray_df, pandas_df)
pandas_df_2 = pandas.DataFrame(frame_data_2).squeeze()
ray_df_2 = pd.DataFrame(frame_data_2).squeeze()
df_equals(ray_df_2, pandas_df_2)
pandas_df_3 = pandas.DataFrame(frame_data_3).squeeze()
ray_df_3 = pd.DataFrame(frame_data_3).squeeze()
df_equals(ray_df_3, pandas_df_3)
pandas_df_4 = pandas.DataFrame(frame_data_4).squeeze()
ray_df_4 = pd.DataFrame(frame_data_4).squeeze()
df_equals(ray_df_4, pandas_df_4)
pandas_df_5 = pandas.DataFrame(frame_data_5).squeeze()
ray_df_5 = pd.DataFrame(frame_data_5).squeeze()
df_equals(ray_df_5, pandas_df_5)
data = [
[
pd.Timestamp("2019-01-02"),
pd.Timestamp("2019-01-03"),
pd.Timestamp("2019-01-04"),
pd.Timestamp("2019-01-05"),
],
[1, 1, 1, 2],
]
df = pd.DataFrame(data, index=["date", "value"]).T
pf = pandas.DataFrame(data, index=["date", "value"]).T
df.set_index("date", inplace=True)
pf.set_index("date", inplace=True)
df_equals(df.iloc[0], pf.iloc[0])
def test_stack(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).stack()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize("ddof", int_arg_values, ids=arg_keys("ddof", int_arg_keys))
def test_std(self, request, data, axis, skipna, numeric_only, ddof):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
else:
modin_result = modin_df.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
else:
modin_result = modin_df.T.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
df_equals(modin_result, pandas_result)
def test_style(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).style
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_sum(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.sum(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.sum(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.sum(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.sum(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.sum(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.T.sum(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sum_single_column(self, data):
modin_df = pd.DataFrame(data).iloc[:, [0]]
pandas_df = pandas.DataFrame(data).iloc[:, [0]]
df_equals(modin_df.sum(), pandas_df.sum())
df_equals(modin_df.sum(axis=1), pandas_df.sum(axis=1))
def test_swapaxes(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).swapaxes(0, 1)
def test_swaplevel(self):
df = pd.DataFrame(
index=pd.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
)
)
df["Value"] = np.random.randint(1, 100, len(df))
with pytest.warns(UserWarning):
df.swaplevel("Number", "Color")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_tail(self, data, n):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.tail(n), pandas_df.tail(n))
df_equals(modin_df.tail(len(modin_df)), pandas_df.tail(len(pandas_df)))
def test_take(self):
df = pd.DataFrame(
[
("falcon", "bird", 389.0),
("parrot", "bird", 24.0),
("lion", "mammal", 80.5),
("monkey", "mammal", np.nan),
],
columns=["name", "class", "max_speed"],
index=[0, 2, 3, 1],
)
with pytest.warns(UserWarning):
df.take([0, 3])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_to_records(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = | pandas.DataFrame(data) | pandas.DataFrame |
import mygene
import pandas as pd
import scipy
import scipy.stats as ss
from tqdm import tqdm
def compute_bulk_correlations(
ad,
bulk_expr_path,
mapping_file_path,
imputed_obsm_key=None,
rpkm_prefix="900",
tf_list=None,
):
ad.var_names_make_unique()
# Read the mapping file which maps Ensembl gene IDs to symbol IDs
mapping_df = pd.read_csv(mapping_file_path)
mapping_df.index = mapping_df["EnsemblId"]
mapping_df = mapping_df.drop_duplicates(subset="SymbolId")
# Read the bulk-expression data and set index
bulk_expr_df = pd.read_csv(bulk_expr_path)
bulk_expr_df.index = bulk_expr_df["name"]
# Get the bulk expression values and gene IDs for all mapped genes
bulk_expr_vals = bulk_expr_df.loc[mapping_df.index][f"rpkm_{rpkm_prefix}"]
bulk_expr_genes = mapping_df.loc[mapping_df.index]["SymbolId"]
bulk_expr_vals.index = bulk_expr_genes
# Compute the set of genes which are common in bulk and scRNA data
sc_expr_genes = ad.var_names
if tf_list is not None:
common_genes = list(
set(sc_expr_genes).intersection(set(bulk_expr_genes)).intersection(tf_list)
)
else:
common_genes = list(set(sc_expr_genes).intersection(set(bulk_expr_genes)))
# Compute the correlation of the expression of each cell with the bulk expr data
# using the set of common genes computed above
p = []
common_bulk_expr_val = bulk_expr_vals.loc[common_genes]
if imputed_obsm_key is not None:
ad_ = ad.obsm[imputed_obsm_key]
else:
ad_ = ad.X
if isinstance(ad_, scipy.sparse.csr_matrix):
ad_ = ad_.todense()
ad_df = | pd.DataFrame(ad_, index=ad.obs_names, columns=ad.var_names) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import pathlib
import os
import json
import subprocess
import glob
import torch
import nltk
nltk.download('punkt')
import sys
root_path = 'DataX15/Final Project/'
sys.path.append(root_path)
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
def create_credentials(username, password, root_path='DataX15/Final Project/'):
credentials = {"username": username, "password": password}
jsonString = json.dumps(credentials)
jsonFile = open(root_path + "run-to-update/secret.json", "w")
jsonFile.write(jsonString)
jsonFile.close()
def update_reviews_studied_companies(root_path='DataX15/Final Project/'):
companies = pd.read_csv(root_path + 'datasets/studied_companies.csv', sep = ';')
for company in companies.Company:
# Get company info
url = companies[companies['Company'] == company].URL.values[0]
last_date = companies[companies['Company'] == company]['Latest review'].values[0]
company_file = | pd.read_csv(root_path + f'datasets/brut-datasets/{company}_reviews.csv', sep = ';') | pandas.read_csv |
import sys
import pandas as pd
import numpy as np
import os.path as op
from tqdm import tqdm
from glob import glob
from collections import defaultdict
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import OneHotEncoder
sys.path.append('src')
from models import KernelClassifier
# Define parameter names (AUs) and target label (EMOTIONS)
PARAM_NAMES = np.loadtxt('data/au_names_new.txt', dtype=str).tolist()
EMOTIONS = np.array(['anger', 'disgust', 'fear', 'happy', 'sadness', 'surprise'])
ohe = OneHotEncoder(sparse=False)
ohe.fit(EMOTIONS[:, np.newaxis])
# Define analysis parameters
beta = 1
kernel = 'cosine'
ktype = 'similarity'
scores_all = []
# Loop across mappings (Darwin, Ekman, etc.)
mappings = ['Cordaro2018IPC', 'Cordaro2018ref', 'Darwin', 'Ekman', 'Keltner2019', 'Matsumoto2008',
# 'JackSchyns_ethn-WC_sub-train_trial-train',
# 'JackSchyns_ethn-EA_sub-train_trial-train',
# 'JackSchyns_ethn-all_sub-train_trial-train',
]
files = sorted(glob('data/ratings/*/*.tsv'))
mega_df = pd.concat([pd.read_csv(f, sep='\t', index_col=0) for f in files], axis=0)
mega_df = mega_df.query("sub_split == 'train' & trial_split == 'train'")
mega_df = mega_df.query("emotion != 'other'") # remove non-emo trials
mega_df = mega_df.loc[mega_df.index != 'empty', :] # remove trials w/o AUs
for mapp_name in mappings:
# Initialize model!
model = KernelClassifier(au_cfg=None, param_names=None, kernel=kernel, ktype=ktype,
binarize_X=False, normalization='softmax', beta=beta)
# Note that there is no "fitting" of the model! The mappings themselves
# can be interpreted as already-fitted models
model.add_Z( | pd.read_csv(f'data/{mapp_name}.tsv', sep='\t', index_col=0) | pandas.read_csv |
"""
demand calculation
"""
import geopandas as gpd
import pandas as pd
from shapely import wkt
import numpy as np
from _utils import *
from _geometry_utils import *
import time
from _file_import_demcalc import *
# supress warnings
pd.options.mode.chained_assignment = None # default='warn'
# def calc_demand(timestamp, sp_dem):
timestamp = time.strftime("%Y%m%d-%H%M%S")
# collecting traffic counts
tc_output = retrieve_tc_data_for_all_segments_for_dir(
segments_gdf, tc_gdf, asfinag_data, vehicle_type="Kfz <= 3,5t hzG"
)
tc_gdf["tc_dir_0"] = tc_output.dir_0.to_list()
tc_gdf["tc_dir_1"] = tc_output.dir_1.to_list()
# producing pois_df for all segments
# poi (=point of interest) encompass all points along the segments at which traffic counts estimates are to be estimated
# for further demand estimation
pois_df = | pd.DataFrame() | pandas.DataFrame |
import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import pandas as pd
import json
from datetime import datetime
def send_email(receiver_email, appointment_details, user_name):
sender_email = "email"
password = "password"
message = MIMEMultipart("alternative")
message["Subject"] = "Vaccine slot available"
message["From"] = sender_email
message["To"] = receiver_email
# Create the plain-text and HTML version of your message
text = """\
Hi,
Please find below vaccine slot availablity for you,
slot available at 4:00 PM 06/05/2021 at Jahangir Hospital, Pune
click here to navigate to the hostpital"""
html = f"""\
<html>
<body>
<p>Hi {user_name},<br>
Please find below vaccine slot availablity for you,<br>
{appointment_details.to_html()}<br>
<a href="https://www.cowin.gov.in/home">Click here to book the appointment</a>
</p>
<p>Please reply back to us on this email if you dont want to receive the updates anymore.</p>
<p>Regards,<br>
Team Dobby<br>
</p>
</body>
</html>
"""
# Turn these into plain/html MIMEText objects
part1 = MIMEText(text, "plain")
part2 = MIMEText(html, "html")
# Add HTML/plain-text parts to MIMEMultipart message
# The email client will try to render the last part first
message.attach(part1)
message.attach(part2)
# Create secure connection with server and send email
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message.as_string())
return True
def send_email_wrapper(receiver_email: str, appointment_details: str, name: str):
try:
df_appointment = | pd.DataFrame(appointment_details) | pandas.DataFrame |
"""
GAM datasets
"""
# -*- coding: utf-8 -*-
from os.path import dirname
import pandas as pd
import numpy as np
from pygam.utils import make_2d
PATH = dirname(__file__)
def _clean_X_y(X, y):
"""ensure that X and y data are float and correct shapes
"""
return make_2d(X, verbose=False).astype('float'), y.astype('float')
def mcycle(return_X_y=True):
"""motorcyle acceleration dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the times after the impact.
y contains the acceleration.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/MASS/mcycle.html
"""
# y is real
# recommend LinearGAM
motor = pd.read_csv(PATH + '/mcycle.csv', index_col=0)
if return_X_y:
X = motor.times.values
y = motor.accel
return _clean_X_y(X, y)
return motor
def coal(return_X_y=True):
"""coal-mining accidents dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
The (X, y) tuple is a processed version of the otherwise raw DataFrame.
A histogram of 150 bins has been computed describing the number accidents per year.
X contains the midpoints of histogram bins.
y contains the count in each histogram bin.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/boot/coal.html
"""
# y is counts
# recommend PoissonGAM
coal = pd.read_csv(PATH + '/coal.csv', index_col=0)
if return_X_y:
y, x = np.histogram(coal.values, bins=150)
X = x[:-1] + np.diff(x)/2 # get midpoints of bins
return _clean_X_y(X, y)
return coal
def faithful(return_X_y=True):
"""old-faithful dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
The (X, y) tuple is a processed version of the otherwise raw DataFrame.
A histogram of 200 bins has been computed describing the wating time between eruptions.
X contains the midpoints of histogram bins.
y contains the count in each histogram bin.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/datasets/faithful.html
"""
# y is counts
# recommend PoissonGAM
faithful = pd.read_csv(PATH + '/faithful.csv', index_col=0)
if return_X_y:
y, x = np.histogram(faithful['eruptions'], bins=200)
X = x[:-1] + np.diff(x)/2 # get midpoints of bins
return _clean_X_y(X, y)
return faithful
def wage(return_X_y=True):
"""wage dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the year, age and education of each sampled person.
The education category has been transformed to integers.
y contains the wage.
Source:
https://github.com/JWarmenhoven/ISLR-python/blob/master/Notebooks/Data/Wage.csv
"""
# y is real
# recommend LinearGAM
wage = pd.read_csv(PATH + '/wage.csv', index_col=0)
if return_X_y:
X = wage[['year', 'age', 'education']].values
X[:,-1] = np.unique(X[:,-1], return_inverse=True)[1]
y = wage['wage'].values
return _clean_X_y(X, y)
return wage
def trees(return_X_y=True):
"""cherry trees dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the girth and the height of each tree.
y contains the volume.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/datasets/trees.html
"""
# y is real.
# recommend InvGaussGAM, or GAM(distribution='gamma', link='log')
trees = pd.read_csv(PATH + '/trees.csv', index_col=0)
if return_X_y:
y = trees.Volume.values
X = trees[['Girth', 'Height']].values
return _clean_X_y(X, y)
return trees
def default(return_X_y=True):
"""credit default dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the category of student or not, credit card balance,
and income.
y contains the outcome of default (0) or not (1).
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/ISLR/Default.html
"""
# y is binary
# recommend LogisticGAM
default = pd.read_csv(PATH + '/default.csv', index_col=0)
if return_X_y:
default = default.values
default[:,0] = np.unique(default[:,0], return_inverse=True)[1]
default[:,1] = np.unique(default[:,1], return_inverse=True)[1]
X = default[:,1:]
y = default[:,0]
return _clean_X_y(X, y)
return default
def cake(return_X_y=True):
"""cake dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the category of recipe used transformed to an integer,
the catergory of replicate, and the temperatue.
y contains the angle at which the cake broke.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/lme4/cake.html
"""
# y is real
# recommend LinearGAM
cake = | pd.read_csv(PATH + '/cake.csv', index_col=0) | pandas.read_csv |
import streamlit as st
import urllib
import json
import pandas as pd
import numpy as np
from pathlib import Path
from coronavirus.db_utils.db_utils import DataBase
from coronavirus.preprocessor.preprocessor import consolidate_country_regions
def get_totals():
"""Displays total deaths, confirmed, and recovered"""
db = DataBase('COVID-19.db')
confirmed_df = db.read_table_to_dataframe('jh_global_confirmed',
'confirmed')
deaths_df = db.read_table_to_dataframe('jh_global_deaths',
'deaths')
recovered_df = db.read_table_to_dataframe('jh_global_recovered',
'recovered')
confirmed_df = consolidate_country_regions(confirmed_df)
deaths_df = consolidate_country_regions(deaths_df)
recovered_df = consolidate_country_regions(recovered_df)
confirmed_df = get_most_recent_numbers(confirmed_df)
deaths_df = get_most_recent_numbers(deaths_df)
recovered_df = get_most_recent_numbers(recovered_df)
confirmed_total = confirmed_df['confirmed'].sum()
deaths_total = deaths_df['deaths'].sum()
recovered_total = recovered_df['recovered'].sum()
return confirmed_total, deaths_total, recovered_total
def get_most_recent_numbers(df):
"""Returns most recent data"""
return df.loc[df['date'] == df['date'].max()]
def string_of_spaces(n):
"""
Creates a string of html spaces
:param n {int}: number of spaces
"""
return " " * n
# TODO: get date of x number of cases reached
def get_date_of_x_cases_reached(df, x):
"""
Determines the date hit n number of cases were reached
:param df: pandas df
:param x {int}: number of cases
"""
pass
# TODO: create column of days since x number of cases reached
def add_column_date_of_x_cases_reached(df, x):
"""
create column of days since x number of cases reached
:param df: pandas df
:param x {int}: number of cases
"""
pass
# TODO: create column of cases each day
def add_column_cases_per_day(df, response, name):
"""
Create column of number of cases since previous day
:param df: pandas df sorted by date in ascending
:param reponse {str}: the response column to calculate rate
:param name {str}: new column name
"""
# Sort by ascending so the inevitable NaN of first row is the first day
# not the current day
rate_df = df.sort_values(by='date', ascending=True)
# TODO: make groupby 'country/region' 'state/province' agnostic
# TODO: probably wrap this in a class
def calculate_rate(x):
return x - x.shift(1)
# Select response, groupby country, calculate rate, transform back to df
rate_df[name] = (rate_df.groupby(['country/region'])[response]
.transform(calculate_rate))
rate_df = rate_df.reindex(columns=['country/region', response,
name, 'date'])
return rate_df.sort_values(by='date', ascending=False)
def _max_width_():
"""Workaround in html to not having a 'Wide Mode()' setting"""
max_width_str = f"max-width: 2000px;"
st.markdown(f"""
<style>
.reportview-container .main .block-container{{
{max_width_str} }}
</style>
""",
unsafe_allow_html=True,
)
def add_ISO2_country_codes(df):
"""Adds ISO2 country codes to dataframe"""
link = "http://country.io/names.json"
f = urllib.request.urlopen(link)
country_json = f.read().decode("utf-8")
country_ISO2 = json.loads(country_json)
country_ISO2_df = pd.DataFrame(country_ISO2.items(), columns=['ISO2 Code', 'country/region'])
return pd.merge(df, country_ISO2_df, on='country/region', how='inner')
# df.head()
def add_ISO3_country_codes(df):
"""Adds ISO3 country codes to dataframe"""
link = "http://country.io/iso3.json"
f = urllib.request.urlopen(link)
country_json = f.read().decode("utf-8")
country_ISO3 = json.loads(country_json)
country_ISO3_df = pd.DataFrame(country_ISO3.items(), columns=['ISO2 Code', 'ISO3 Code'])
return pd.merge(df, country_ISO3_df, on='ISO2 Code', how='inner')
# df.head()
def add_population_density(df):
"""Joins population density from 2018"""
db = DataBase('COVID-19.db')
population_df = db.load_population_density_df()
merged_df = df.merge(population_df[['Country Code', '2018']],
how='left',
left_on=['ISO3 Code'],
right_on=['Country Code'])
merged_df['pop_density_per_sq_km'] = merged_df['2018']
merged_df = merged_df.drop(columns=['Country Code', '2018'])
return merged_df
def add_country_population(df):
"""Joins population density from 2018"""
db = DataBase('COVID-19.db')
population_df = db.load_population_df()
merged_df = df.merge(population_df[['Country Code', '2018']],
how='left',
left_on=['ISO3 Code'],
right_on=['Country Code'])
merged_df['population'] = merged_df['2018']
merged_df = merged_df.drop(columns=['Country Code', '2018'])
return merged_df
def add_google_mobility_data(df):
"""Join Google mobility data"""
# db = DataBase('COVID-19.db')
# google_df = db.pull_google_mobility_data()
# link = "https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv"
link = Path.cwd() / 'data/Global_Mobility_Report.csv'
google_df = pd.read_csv(link)
google_df['date'] = pd.to_datetime(google_df['date'])
merged_df = df.merge(google_df,
how='left',
left_on=['ISO2 Code', 'date'],
right_on=['country_region_code', 'date'])
return merged_df
# TODO: Rolling average
def rolling_mean(df, num_days):
"""
Window average on response. Smooths variations due to problems with
reporting.
df: a pandas series for the response
return:
"""
n = num_days - 1
response = df.to_numpy()
avg_response = response.copy()
for i in range(1, len(response)):
if i <= num_days:
print(f'i = {i}')
print(f'n = {n}')
print(f'num_days = {num_days}')
print(f'num_days-n = {num_days - n}')
avg_response[i] = round(response[0:num_days-n].mean())
print(f'avg = {avg_response[i]}')
print()
n -= 1
else:
print(f'i = {i}')
print(f'i-num_days = {i-num_days}')
print(f'response[i-n:i] = response[i-num_days:i]')
avg_response[i] = round(response[i-num_days:i].mean())
print(f'avg = {avg_response[i]}')
print()
return | pd.Series(avg_response) | pandas.Series |
import pandas as pd
import numpy as np
def text_vectorize_and_cluster(text, df=None, vectorizer=None, clusterer=None,
vector_params=None, clusterer_params=None,
outlier_scores=False, one_hot_labels=False, return_df=False,
return_type='clusters'):
""" Given processed text, vectorize and cluster it. Return cluster labels or cluster labels
along with fitted vectorizer and clusterer.
Parameters
----------
text : object
Object which contains text that will be passed to the transformer's .fit_transform() method
As such, text must already be processed and in correct format.
df : Pandas DataFrame
Optional dataframe attach clustering results to
vectorizer: object
Class for text vectorization. Must follow sklearn transformer convention and
implement .fit_transform() method
E.g. CountVectorizer from sklearn
vector_params: dict[str:obj]
Dictionary to pass to vectorizer as parameters
clusterer: object
Class for clustering. Must follow sklearn estimator convention and
implement .fit_predict() method for implementing cluster assignment
clusterer_params: dict[str:obj]
Dictionary to pass to clusterer as parameters
outlier_scores: boolean
Flag to indicate outlier scores computed by clusterer. Accessed
from clusterer.outlier_scores_ attribute
one_hot_labels: boolean
Flag to indicate if cluster labels should be one hot encoded
instead of returns as a one dimensional array of ordinal
integer labels
return_df: boolean
Flag to indicate if results should be returned concatenated
with the dataframe passed to 'df' kword arg
return_type: str in ['clusters', 'all', ]
String indicating return type. Must be on of ['clusters', 'all', 'df']
clusters: Return the cluster results as a one dimensional array of ordinal
integer labels or concatenated to dataframe if return_df=True
all: Return the fitted vectorizer, clusterer and cluster label results
Returns
-------
clusters: pd.Series or pd.DataFrame
Return the cluster results as a one dimensional array of ordinal
integer labels or concatenated to dataframe if return_df=True
clusters, vectorizer, clusterer: object, object, pd.Series or pd.DataFrame
Return the fitted vectorizer, clusterer and cluster label results
"""
# Check vectorizer and clusterer for correct methods
assert "fit_transform" in dir(vectorizer), "vectorizer has no 'fit_transform' method"
assert "fit_predict" in dir(clusterer), "clusterer has no 'fit_predict' method"
if return_df:
assert isinstance(df, pd.DataFrame), "If specifying 'return_df', data must be passed to argument 'df'"
# Instantiate vectorizer with params if specified
if vector_params:
vectorizer = vectorizer(**vector_params)
# Else instantiate the vectorizer
elif vectorizer:
vectorizer = vectorizer()
# Fit and trasnform text to vectors
vectors = vectorizer.fit_transform(text)
# Instantiate vectorizer with params if specified
if clusterer_params:
clusterer = clusterer(**clusterer_params)
elif clusterer:
clusterer = clusterer()
# Fit and trasnform vectors to clusters
cluster_labels = clusterer.fit_predict(vectors)
if len(set(clusterer.labels_)) <= 1:
return print('Clusterer could not find any meaningful labels. All data would fall under one cluster')
# Create DataFrame of Cluster Labels
results = | pd.DataFrame(cluster_labels, columns=['Cluster_Label']) | pandas.DataFrame |
import os, sys
import pandas as pd
import numpy as np
import xarray as xr
import glob
import io
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
plt.rcParams.update({'font.size': 16})
opj = os.path.join
dir = '/DATA/projet/borges'
dirdata = opj(dir, 'data')
aerosols = ['fine', 'coarse']
aerosol = aerosols[1]
method = 'osoaa_' + aerosol
odir = opj(dirdata, 'L2', method)
files = glob.glob(opj(odir, 'awr_L2_*.csv'))
rho = pd.DataFrame()
for file in files:
# test
df = | pd.read_csv(file, header=[0, 1], index_col=0, parse_dates=True) | pandas.read_csv |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### COVID-19 Global Stats
# This notebook lets you examine and plot the data shared throught the JHU CSSE git repo
#
# git clone https://github.com/CSSEGISandData/COVID-19.git
#
# The git repo provides country-by-country (and province by province, for some countries) daily stats for confirmed COVID-19 cases, and deaths.
#
# This notebook pulls just the country-level data out of the repo. An accompanying notebook does the same for the US state-by-state stats.
import pandas as pd
import numpy as np
from pathlib import Path
# %autosave 0
# You will need to set 'repo' to the absolute pathname for the COVID-19 repo, on your system.
repo = Path.home() / "data-stuff/COVID-19"
data_dir = Path(repo) / "csse_covid_19_data/csse_covid_19_time_series"
confirmed = pd.read_csv(data_dir / "time_series_covid19_confirmed_global.csv")
deaths = | pd.read_csv(data_dir / "time_series_covid19_deaths_global.csv") | pandas.read_csv |
#! /usr/local/bin/python
# ! -*- encoding:utf-8 -*-
from pathlib import Path
import pandas as pd
import numpy as np
import random
import os
def generate_gt(clusters, dataset):
cci_labels_gt_path = '{}/mouse_small_intestine_1189_cci_labels_gt_{}_{}.csv'
cci_labels_junk_path = '{}/mouse_small_intestine_1189_cci_labels_junk_{}_{}.csv'
data_path = 'mouse_small_intestine_1189_data.csv'
type_path = 'mouse_small_intestine_1189_cellcluster.csv'
cci_path = 'mouse_small_intestine_1189_cluster_cluster_interaction_combined.csv'
ligand_receptor_pair_path = 'mouse_ligand_receptor_pair.csv'
# prepare data and cell2type
df = pd.read_csv(data_path, index_col=0) # (gene, cell)
genes = set(df.index.tolist())
df = df.fillna(0)
df = df.transpose(copy=True) # (cell, gene)
df['id'] = range(0, len(df)) # add cell id
df['id'].astype(int)
cell2type = pd.read_csv(type_path, index_col=0)
cell2type.columns = ['cell', 'type']
assert cell2type['cell'].tolist() == df.index.tolist()
df['type'] = cell2type['type'].tolist()
# prepare cell cell interaction
cci = | pd.read_csv(cci_path, header=0, index_col=0) | pandas.read_csv |
"""Module contains test cases for layers.py module."""
import glob
import os
import unittest
import navis
import neuroglancer as ng
import pandas as pd
from pyroglancer.layers import _handle_ngdimensions
from pyroglancer.layers import create_nglayer
from pyroglancer.layers import get_ngserver
from pyroglancer.localserver import closedataserver
from pyroglancer.localserver import startdataserver
from pyroglancer.ngviewer import closeviewer
from pyroglancer.ngviewer import openviewer
import pytest
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Add a common viewer, dataserver(specific port for travis) for each test module..
closeviewer()
closedataserver()
startdataserver(port=8007) # start dataserver..
openviewer(headless=True) # open ngviewer
# def setup_module(module):
# """Start all servers."""
# # Add a common viewer, dataserver for the whole serie of test..
# startdataserver() # start dataserver..
# openviewer(headless=True) # open ngviewer
#
#
# def teardown_module(module):
# """Stop all servers."""
# # Stop all viewers..
# closedataserver()
# closeviewer()
class Testlayers(unittest.TestCase):
"""Test pyroglancer.layers."""
# def setUp(self):
# """Perform set up."""
# super(Testsynapses, self).setUp()
#
# def tearDown(self):
# """Perform tearing down."""
# super(Testsynapses, self).tearDown()
def test_create_ngsegmentlayer(self):
"""Check if the segdataset seg_20190805 is created."""
layer_serverdir, layer_host = get_ngserver()
ngviewer = openviewer(None)
ngviewer2 = create_nglayer(ngviewer=ngviewer,
layer_kws={'type': 'segdataset', 'ngspace': 'FAFB', 'name': 'seg_20190805'})
assert ngviewer2 == ngviewer
def test_create_ngbuhmannsynapselayer(self):
"""Check if the buhmann synapse layer is created."""
layer_serverdir, layer_host = get_ngserver()
ngviewer = openviewer(None)
ngviewer2 = create_nglayer(ngviewer=ngviewer,
layer_kws={'type': 'synapticlayer', 'ngspace': 'FAFB',
'name': 'synapses_buhmann2019'})
assert ngviewer2 == ngviewer
def test_create_ngsynapticcleftslayer(self):
"""Check if the synaptic clefts layer is created."""
layer_serverdir, layer_host = get_ngserver()
ngviewer = openviewer(None)
ngviewer2 = create_nglayer(ngviewer=ngviewer,
layer_kws={'type': 'synapticclefts', 'ngspace': 'FAFB',
'name': 'clefts_Heinrich_etal'})
assert ngviewer2 == ngviewer
def test_create_ngtreeneuronlist(self):
"""Check if create layer works in a tree neuronlist."""
# load some example neurons..
swc_path = os.path.join(BASE_DIR, 'data/swc')
# print('swc_path: ', swc_path)
swc_files = glob.glob(os.path.join(swc_path, '*.swc'))
# print('swc_file: ', swc_files)
neuronlist = []
neuronlist += [navis.read_swc(f, units='8 nm', connector_labels={'presynapse': 7, 'postsynapse': 8},
id=int(os.path.splitext(os.path.basename(f))[0])) for f in swc_files]
neuronlist = navis.core.NeuronList(neuronlist)
ngviewer = openviewer(None)
ngviewer2 = create_nglayer(layer_kws={'type': 'skeletons', 'source': neuronlist, 'ngspace': 'FAFB',
'color': ['white', 'green', 'grey', 'yellow', 'magenta'],
'alpha': 0.9})
assert ngviewer2 == ngviewer
def test_create_ngpointslayer(self):
"""Check if the points layer is created."""
layer_serverdir, layer_host = get_ngserver()
ngviewer = openviewer(None)
location_data = [{'x': 5, 'y': 10, 'z': 20}, {'x': 15, 'y': 25, 'z': 30}]
points = | pd.DataFrame(location_data) | pandas.DataFrame |
import math
import os
from typing import List, Tuple
import numpy as np
import pandas as pd
from tqdm import tqdm
import sketch.sketch_gen as board_sketch
import storyboard.board_gen as board_gen
# import storyboard.board_query as board_query
import storyboard.query_cy as board_query
import linear_board
def gen_workload(
granularity: int,
seed: int = 0,
num_queries: int = 100,
query_lens=None,
):
if query_lens is None:
query_lens = [
2**i for i in range(int(math.log2(granularity)))
]
workload = []
r = np.random.RandomState(seed)
for cur_query_len in query_lens:
start_idxs = r.randint(0, granularity - cur_query_len, size=num_queries)
workload.extend([(start_idx, start_idx + cur_query_len) for start_idx in start_idxs])
return workload
def run_workload(
workload: List[Tuple],
x_to_track: np.ndarray,
true_board: pd.DataFrame,
est_board: pd.DataFrame,
totals_df: pd.DataFrame,
sketch_name: str,
quantile: bool,
accumulator_size: int = 0,
):
results = []
dyadic_base = -1
if "dyadic" in sketch_name:
dyadic_base = linear_board.get_dyadic_base(sketch_name)
for start_idx, end_idx in tqdm(workload):
true_counts = board_query.query_linear(
true_board, seg_start=start_idx, seg_end=end_idx, x_to_track=x_to_track,
quantile=quantile, dyadic_base=-1)
if accumulator_size == 0:
est_counts = board_query.query_linear(
est_board, seg_start=start_idx, seg_end=end_idx, x_to_track=x_to_track,
quantile=quantile, dyadic_base=dyadic_base)
else:
if quantile:
est_counts = board_query.query_linear_acc_quant(
est_board, seg_start=start_idx, seg_end=end_idx, x_to_track=x_to_track,
acc_size=accumulator_size,
)
else:
est_counts = board_query.query_linear_mg(
est_board, seg_start=start_idx, seg_end=end_idx, x_to_track=x_to_track,
acc_size=accumulator_size,
)
true_tot = board_query.query_linear_tot(totals_df, start_idx, end_idx)
cur_results = board_query.calc_errors(true_counts, est_counts)
# print(true_counts)
# print(est_counts)
cur_results["start_idx"] = start_idx
cur_results["end_idx"] = end_idx
cur_results["query_len"] = end_idx - start_idx
cur_results["sketch"] = sketch_name
cur_results["total"] = true_tot
cur_results["acc_size"] = accumulator_size
results.append(cur_results)
return results
def calc_results(
workload: List,
data_name: str,
granularity: int,
sketch_name: str,
sketch_size: int,
baseline_size: int,
quantile: bool,
accumulator_size: int = 0,
):
if quantile:
true_sketch= "q_top_values"
else:
true_sketch = "top_values"
x_to_track = linear_board.get_tracked(data_name)
true_file = linear_board.get_file_name(
data_name=data_name,
granularity=granularity,
sketch_name=true_sketch,
sketch_size=baseline_size,
)
sketch_file = linear_board.get_file_name(
data_name=data_name,
granularity=granularity,
sketch_name=sketch_name,
sketch_size=sketch_size,
)
true_board = pd.read_pickle(true_file)
totals_df = pd.read_csv(
linear_board.get_totals_name(data_name, granularity=granularity)
)
print("Estimating: {}".format(sketch_name))
cur_board = | pd.read_pickle(sketch_file) | pandas.read_pickle |
import os
import collections
import unittest
import pytest
import pytz
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
import pvlib
from .context import capdata as pvc
data = np.arange(0, 1300, 54.167)
index = pd.date_range(start='1/1/2017', freq='H', periods=24)
df = pd.DataFrame(data=data, index=index, columns=['poa'])
# capdata = pvc.CapData('capdata')
# capdata.df = df
"""
Run all tests from project root:
'python -m tests.test_CapData'
Run individual tests:
'python -m unittest tests.test_CapData.Class.Method'
-m flag imports unittest as module rather than running as script
Run tests using pytest use the following from project root.
To run a class of tests
pytest tests/test_CapData.py::TestCapDataEmpty
To run a specific test:
pytest tests/test_CapData.py::TestCapDataEmpty::test_capdata_empty
"""
test_files = ['test1.csv', 'test2.csv', 'test3.CSV', 'test4.txt',
'pvsyst.csv', 'pvsyst_data.csv']
class TestUpdateSummary:
"""Test the update_summary wrapper and functions used within."""
def test_round_kwarg_floats(self):
"""Tests round kwarg_floats."""
kwarg_dict = {'ref_val': 763.4536140499999, 't1': 2, 'inplace': True}
rounded_kwarg_dict_3 = {'ref_val': 763.454, 't1': 2,
'inplace': True}
assert pvc.round_kwarg_floats(kwarg_dict) == rounded_kwarg_dict_3
rounded_kwarg_dict_4 = {'ref_val': 763.4536, 't1': 2,
'inplace': True}
assert pvc.round_kwarg_floats(kwarg_dict, 4) == rounded_kwarg_dict_4
def test_tstamp_kwarg_to_strings(self):
"""Tests coversion of kwarg values from timestamp to strings."""
start_datetime = pd.to_datetime('10/10/1990 00:00')
kwarg_dict = {'start': start_datetime, 't1': 2}
kwarg_dict_str_dates = {'start': '1990-10-10 00:00', 't1': 2}
assert pvc.tstamp_kwarg_to_strings(kwarg_dict) == kwarg_dict_str_dates
class TestTopLevelFuncs(unittest.TestCase):
def test_perc_wrap(self):
"""Test percent wrap function."""
rng = np.arange(1, 100, 1)
rng_cpy = rng.copy()
df = pd.DataFrame({'vals': rng})
df_cpy = df.copy()
bool_array = []
for val in rng:
np_perc = np.percentile(rng, val, interpolation='nearest')
wrap_perc = df.agg(pvc.perc_wrap(val)).values[0]
bool_array.append(np_perc == wrap_perc)
self.assertTrue(all(bool_array),
'np.percentile wrapper gives different value than np perc')
self.assertTrue(all(df == df_cpy), 'perc_wrap function modified input df')
def test_filter_irr(self):
rng = np.arange(0, 1000)
df = pd.DataFrame(np.array([rng, rng+100, rng+200]).T,
columns = ['weather_station irr poa W/m^2',
'col_1', 'col_2'])
df_flt = pvc.filter_irr(df, 'weather_station irr poa W/m^2', 50, 100)
self.assertEqual(df_flt.shape[0], 51,
'Incorrect number of rows returned from filter.')
self.assertEqual(df_flt.shape[1], 3,
'Incorrect number of columns returned from filter.')
self.assertEqual(df_flt.columns[0], 'weather_station irr poa W/m^2',
'Filter column name inadverdently modified by method.')
self.assertEqual(df_flt.iloc[0, 0], 50,
'Minimum value in returned data in filter column is'
'not equal to low argument.')
self.assertEqual(df_flt.iloc[-1, 0], 100,
'Maximum value in returned data in filter column is'
'not equal to high argument.')
def test_fit_model(self):
"""
Test fit model func which wraps statsmodels ols.fit for dataframe.
"""
rng = np.random.RandomState(1)
x = 50 * abs(rng.rand(50))
y = 2 * x - 5 + 5 * rng.randn(50)
df = pd.DataFrame({'x': x, 'y': y})
fml = 'y ~ x - 1'
passed_ind_vars = fml.split('~')[1].split()[::2]
try:
passed_ind_vars.remove('1')
except ValueError:
pass
reg = pvc.fit_model(df, fml=fml)
for var in passed_ind_vars:
self.assertIn(var, reg.params.index,
'{} ind variable in formula argument not in model'
'parameters'.format(var))
def test_predict(self):
x = np.arange(0, 50)
y1 = x
y2 = x * 2
y3 = x * 10
dfs = [pd.DataFrame({'x': x, 'y': y1}),
pd.DataFrame({'x': x, 'y': y2}),
pd.DataFrame({'x': x, 'y': y3})]
reg_lst = []
for df in dfs:
reg_lst.append(pvc.fit_model(df, fml='y ~ x'))
reg_ser = pd.Series(reg_lst)
for regs in [reg_lst, reg_ser]:
preds = pvc.predict(regs, pd.DataFrame({'x': [10, 10, 10]}))
self.assertAlmostEqual(preds.iloc[0], 10, 7, 'Pred for x = y wrong.')
self.assertAlmostEqual(preds.iloc[1], 20, 7, 'Pred for x = y * 2 wrong.')
self.assertAlmostEqual(preds.iloc[2], 100, 7, 'Pred for x = y * 10 wrong.')
self.assertEqual(3, preds.shape[0], 'Each of the three input'
'regressions should have a'
'prediction')
def test_pred_summary(self):
"""Test aggregation of reporting conditions and predicted results."""
"""
grpby -> df of regressions
regs -> series of predicted values
df of reg parameters
"""
pvsyst = pvc.CapData('pvsyst')
pvsyst.load_data(path='./tests/data/', load_pvsyst=True)
df_regs = pvsyst.data.loc[:, ['E_Grid', 'GlobInc', 'TAmb', 'WindVel']]
df_regs_day = df_regs.query('GlobInc > 0')
grps = df_regs_day.groupby(pd.Grouper(freq='M', label='right'))
ones = np.ones(12)
irr_rc = ones * 500
temp_rc = ones * 20
w_vel = ones
rcs = pd.DataFrame({'GlobInc': irr_rc, 'TAmb': temp_rc, 'WindVel': w_vel})
results = pvc.pred_summary(grps, rcs, 0.05,
fml='E_Grid ~ GlobInc +'
'I(GlobInc * GlobInc) +'
'I(GlobInc * TAmb) +'
'I(GlobInc * WindVel) - 1')
self.assertEqual(results.shape[0], 12, 'Not all months in results.')
self.assertEqual(results.shape[1], 10, 'Not all cols in results.')
self.assertIsInstance(results.index,
pd.core.indexes.datetimes.DatetimeIndex,
'Index is not pandas DatetimeIndex')
col_length = len(results.columns.values)
col_set_length = len(set(results.columns.values))
self.assertEqual(col_set_length, col_length,
'There is a duplicate column name in the results df.')
pt_qty_exp = [341, 330, 392, 390, 403, 406,
456, 386, 390, 346, 331, 341]
gaur_cap_exp = [3089550.4039329495, 3103610.4635679387,
3107035.251399103, 3090681.1145782764,
3058186.270209293, 3059784.2309170915,
3088294.50827525, 3087081.0026879036,
3075251.990424683, 3093287.331878834,
3097089.7852036236, 3084318.093294242]
for i, mnth in enumerate(results.index):
self.assertLess(results.loc[mnth, 'guaranteedCap'],
results.loc[mnth, 'PredCap'],
'Gauranteed capacity is greater than predicted in'
'month {}'.format(mnth))
self.assertGreater(results.loc[mnth, 'guaranteedCap'], 0,
'Gauranteed capacity is less than 0 in'
'month {}'.format(mnth))
self.assertAlmostEqual(results.loc[mnth, 'guaranteedCap'],
gaur_cap_exp[i], 7,
'Gauranted capacity not equal to expected'
'value in {}'.format(mnth))
self.assertEqual(results.loc[mnth, 'pt_qty'], pt_qty_exp[i],
'Point quantity not equal to expected values in'
'{}'.format(mnth))
def test_perc_bounds_perc(self):
bounds = pvc.perc_bounds(20)
self.assertEqual(bounds[0], 0.8,
'{} for 20 perc is not 0.8'.format(bounds[0]))
self.assertEqual(bounds[1], 1.2,
'{} for 20 perc is not 1.2'.format(bounds[1]))
def test_perc_bounds_tuple(self):
bounds = pvc.perc_bounds((15, 40))
self.assertEqual(bounds[0], 0.85,
'{} for 15 perc is not 0.85'.format(bounds[0]))
self.assertEqual(bounds[1], 1.4,
'{} for 40 perc is not 1.4'.format(bounds[1]))
def test_filter_grps(self):
pvsyst = pvc.CapData('pvsyst')
pvsyst.load_data(path='./tests/data/',
fname='pvsyst_example_HourlyRes_2.CSV',
load_pvsyst=True)
pvsyst.set_regression_cols(power='real_pwr--', poa='irr-poa-',
t_amb='temp-amb-', w_vel='wind--')
pvsyst.filter_irr(200, 800)
pvsyst.rep_cond(freq='MS')
grps = pvsyst.data_filtered.groupby(pd.Grouper(freq='MS', label='left'))
poa_col = pvsyst.column_groups[pvsyst.regression_cols['poa']][0]
grps_flt = pvc.filter_grps(grps, pvsyst.rc, poa_col, 0.8, 1.2)
self.assertIsInstance(grps_flt,
pd.core.groupby.generic.DataFrameGroupBy,
'Returned object is not a dataframe groupby.')
self.assertEqual(grps.ngroups, grps_flt.ngroups,
'Returned groubpy does not have the same number of\
groups as passed groupby.')
cnts_before_flt = grps.count()[poa_col]
cnts_after_flt = grps_flt.count()[poa_col]
less_than = all(cnts_after_flt < cnts_before_flt)
self.assertTrue(less_than, 'Points were not removed for each group.')
def test_perc_difference(self):
result = pvc.perc_difference(9, 10)
self.assertAlmostEqual(result, 0.105263158)
result = pvc.perc_difference(10, 9)
self.assertAlmostEqual(result, 0.105263158)
result = pvc.perc_difference(10, 10)
self.assertAlmostEqual(result, 0)
result = pvc.perc_difference(0, 0)
self.assertAlmostEqual(result, 0)
def test_check_all_perc_diff_comb(self):
ser = pd.Series([10.1, 10.2])
val = pvc.check_all_perc_diff_comb(ser, 0.05)
self.assertTrue(val,
'Failed on two values within 5 percent.')
ser = pd.Series([10.1, 10.2, 10.15, 10.22, 10.19])
val = pvc.check_all_perc_diff_comb(ser, 0.05)
self.assertTrue(val,
'Failed with 5 values within 5 percent.')
ser = pd.Series([10.1, 10.2, 3])
val = pvc.check_all_perc_diff_comb(ser, 0.05)
self.assertFalse(val,
'Returned True for value outside of 5 percent.')
def test_sensor_filter_three_cols(self):
rng = np.zeros(10)
df = pd.DataFrame({'a':rng, 'b':rng, 'c':rng})
df['a'] = df['a'] + 4.1
df['b'] = df['b'] + 4
df['c'] = df['c'] + 4.2
df.iloc[0, 0] = 1200
df.iloc[4, 1] = 100
df.iloc[7, 2] = 150
ix = pvc.sensor_filter(df, 0.05)
self.assertEqual(ix.shape[0], 7,
'Filter should have droppe three rows.')
def test_sensor_filter_one_col(self):
rng = np.zeros(10)
df = pd.DataFrame({'a':rng})
df['a'] = df['a'] + 4.1
df.iloc[0, 0] = 1200
ix = pvc.sensor_filter(df, 0.05)
self.assertEqual(ix.shape[0], 10,
'Should be no filtering for single column df.')
def test_determine_pass_or_fail(self):
'Tolerance band around 100%'
self.assertTrue(pvc.determine_pass_or_fail(.96, '+/- 4', 100)[0],
'Should pass, cp ratio equals bottom of tolerance.')
self.assertTrue(pvc.determine_pass_or_fail(.97, '+/- 4', 100)[0],
'Should pass, cp ratio above bottom of tolerance.')
self.assertTrue(pvc.determine_pass_or_fail(1.03, '+/- 4', 100)[0],
'Should pass, cp ratio below top of tolerance.')
self.assertTrue(pvc.determine_pass_or_fail(1.04, '+/- 4', 100)[0],
'Should pass, cp ratio equals top of tolerance.')
self.assertFalse(pvc.determine_pass_or_fail(.959, '+/- 4', 100)[0],
'Should fail, cp ratio below bottom of tolerance.')
self.assertFalse(pvc.determine_pass_or_fail(1.041, '+/- 4', 100)[0],
'Should fail, cp ratio above top of tolerance.')
'Tolerance below 100%'
self.assertTrue(pvc.determine_pass_or_fail(0.96, '- 4', 100)[0],
'Should pass, cp ratio equals bottom of tolerance.')
self.assertTrue(pvc.determine_pass_or_fail(.97, '- 4', 100)[0],
'Should pass, cp ratio above bottom of tolerance.')
self.assertTrue(pvc.determine_pass_or_fail(1.04, '- 4', 100)[0],
'Should pass, cp ratio above bottom of tolerance.')
self.assertFalse(pvc.determine_pass_or_fail(.959, '- 4', 100)[0],
'Should fail, cp ratio below bottom of tolerance.')
'warn on incorrect tolerance spec'
with self.assertWarns(UserWarning):
pvc.determine_pass_or_fail(1.04, '+ 4', 100)
@pytest.fixture(autouse=True)
def _pass_fixtures(self, capsys):
self.capsys = capsys
def test_print_results_pass(self):
"""
This test uses the pytest autouse fixture defined above to
capture the print to stdout and test it, so it must be run
using pytest 'pytest tests/
test_CapData.py::TestTopLevelFuncs::test_print_results_pass'
"""
test_passed = (True, '950, 1050')
pvc.print_results(test_passed, 1000, 970, 0.97, 970, test_passed[1])
captured = self.capsys.readouterr()
results_str = ('Capacity Test Result: PASS\n'
'Modeled test output: 1000.000\n'
'Actual test output: 970.000\n'
'Tested output ratio: 0.970\n'
'Tested Capacity: 970.000\n'
'Bounds: 950, 1050\n\n\n')
self.assertEqual(results_str, captured.out)
def test_print_results_fail(self):
"""
This test uses the pytest autouse fixture defined above to
capture the print to stdout and test it, so it must be run
using pytest 'pytest tests/
test_CapData.py::TestTopLevelFuncs::test_print_results_pass'
"""
test_passed = (False, '950, 1050')
pvc.print_results(test_passed, 1000, 940, 0.94, 940, test_passed[1])
captured = self.capsys.readouterr()
results_str = ('Capacity Test Result: FAIL\n'
'Modeled test output: 1000.000\n'
'Actual test output: 940.000\n'
'Tested output ratio: 0.940\n'
'Tested Capacity: 940.000\n'
'Bounds: 950, 1050\n\n\n')
self.assertEqual(results_str, captured.out)
class TestLoadDataMethods(unittest.TestCase):
"""Test for load data methods without setup."""
def test_load_pvsyst(self):
pvsyst = pvc.CapData('pvsyst')
pvsyst = pvsyst.load_pvsyst('./tests/data/',
'pvsyst_example_HourlyRes_2.CSV')
self.assertEqual(8760, pvsyst.shape[0],
'Not the correct number of rows in imported data.')
self.assertIsInstance(pvsyst.index,
pd.core.indexes.datetimes.DatetimeIndex,
'Index is not a datetime index.')
self.assertIsInstance(pvsyst.columns,
pd.core.indexes.base.Index,
'Columns might be MultiIndex; should be base index')
def test_source_alsoenergy(self):
das_1 = pvc.CapData('das_1')
das_1.load_data(path='./tests/data/col_naming_examples/',
fname='ae_site1.csv', source='AlsoEnergy')
col_names1 = ['Elkor Production Meter PowerFactor, ',
'Elkor Production Meter KW, kW',
'Weather Station 1 TempF, °F', 'Weather Station 2 Sun2, W/m²',
'Weather Station 1 Sun, W/m²', 'Weather Station 1 WindSpeed, mph',
'index']
self.assertTrue(all(das_1.data.columns == col_names1),
'Column names are not expected value for ae_site1')
das_2 = pvc.CapData('das_2')
das_2.load_data(path='./tests/data/col_naming_examples/',
fname='ae_site2.csv', source='AlsoEnergy')
col_names2 = ['Acuvim II Meter PowerFactor, PF', 'Acuvim II Meter KW, kW',
'Weather Station 1 TempF, °F', 'Weather Station 3 TempF, °F',
'Weather Station 2 Sun2, W/m²', 'Weather Station 4 Sun2, W/m²',
'Weather Station 1 Sun, W/m²', 'Weather Station 3 Sun, W/m²',
'Weather Station 1 WindSpeed, mph',
'Weather Station 3 WindSpeed, mph',
'index']
self.assertTrue(all(das_2.data.columns == col_names2),
'Column names are not expected value for ae_site1')
def test_load_das(self):
das = pvc.CapData('das')
das = das.load_das('./tests/data/',
'example_meas_data.csv')
self.assertEqual(1440, das.shape[0],
'Not the correct number of rows in imported data.')
self.assertIsInstance(das.index,
pd.core.indexes.datetimes.DatetimeIndex,
'Index is not a datetime index.')
self.assertIsInstance(das.columns,
pd.core.indexes.base.Index,
'Columns might be MultiIndex; should be base index')
class TestCapDataLoadMethods(unittest.TestCase):
"""Tests for load_data method."""
def setUp(self):
os.mkdir('test_csvs')
for fname in test_files:
with open('test_csvs/' + fname, 'a') as f:
f.write('Date, val\n11/21/2017, 1')
self.capdata = pvc.CapData('capdata')
self.capdata.load_data(path='test_csvs/', group_columns=False)
def tearDown(self):
for fname in test_files:
os.remove('test_csvs/' + fname)
os.rmdir('test_csvs')
def test_read_csvs(self):
self.assertEqual(self.capdata.data.shape[0], 3,
'imported a non csv or pvsyst file')
class TestCapDataEmpty:
"""Tests of CapData empty method."""
def test_capdata_empty(self):
"""Test that an empty CapData object returns True."""
empty_cd = pvc.CapData('empty')
assert empty_cd.empty()
def test_capdata_not_empty(self):
"""Test that an CapData object with data returns False."""
cd_with_data = pvc.CapData('with_data')
cd_with_data.load_data(path='tests/data/',
fname='example_meas_data.csv',
group_columns=False)
assert not cd_with_data.empty()
class TestCapDataSeriesTypes(unittest.TestCase):
"""Test CapData private methods assignment of type to each series of data."""
def setUp(self):
self.cdata = pvc.CapData('cdata')
def test_series_type(self):
name = 'weather station 1 weather station 1 ghi poa w/m2'
test_series = pd.Series(np.arange(0, 900, 100), name=name)
out = self.cdata._CapData__series_type(test_series, pvc.type_defs)
self.assertIsInstance(out, str,
'Returned object is not a string.')
self.assertEqual(out, 'irr',
'Returned object is not "irr".')
def test_series_type_caps_in_type_def(self):
name = 'weather station 1 weather station 1 ghi poa w/m2'
test_series = pd.Series(np.arange(0, 900, 100), name=name)
type_def = collections.OrderedDict([
('irr', [['IRRADIANCE', 'IRR', 'PLANE OF ARRAY', 'POA',
'GHI', 'GLOBAL', 'GLOB', 'W/M^2', 'W/M2', 'W/M',
'W/'],
(-10, 1500)])])
out = self.cdata._CapData__series_type(test_series, type_def)
self.assertIsInstance(out, str,
'Returned object is not a string.')
self.assertEqual(out, 'irr',
'Returned object is not "irr".')
def test_series_type_repeatable(self):
name = 'weather station 1 weather station 1 ghi poa w/m2'
test_series = pd.Series(np.arange(0, 900, 100), name=name)
out = []
i = 0
while i < 100:
out.append(self.cdata._CapData__series_type(test_series, pvc.type_defs))
i += 1
out_np = np.array(out)
self.assertTrue(all(out_np == 'irr'),
'Result is not consistent after repeated runs.')
def test_series_type_valErr(self):
name = 'weather station 1 weather station 1 ghi poa w/m2'
test_series = pd.Series(name=name)
out = self.cdata._CapData__series_type(test_series, pvc.type_defs)
self.assertIsInstance(out, str,
'Returned object is not a string.')
self.assertEqual(out, 'irr',
'Returned object is not "irr".')
def test_series_type_no_str(self):
name = 'should not return key string'
test_series = pd.Series(name=name)
out = self.cdata._CapData__series_type(test_series, pvc.type_defs)
self.assertIsInstance(out, str,
'Returned object is not a string.')
self.assertIs(out, '',
'Returned object is not empty string.')
class Test_CapData_methods_sim(unittest.TestCase):
"""Test for top level irr_rc_balanced function."""
def setUp(self):
self.pvsyst = pvc.CapData('pvsyst')
self.pvsyst.load_data(path='./tests/data/', load_pvsyst=True)
# self.jun = self.pvsyst.data.loc['06/1990']
# self.jun_cpy = self.jun.copy()
# self.low = 0.5
# self.high = 1.5
# (self.irr_RC, self.jun_flt) = pvc.irr_rc_balanced(self.jun, self.low,
# self.high)
# self.jun_filter_irr = self.jun_flt['GlobInc']
def test_copy(self):
self.pvsyst.set_regression_cols(power='real_pwr--', poa='irr-ghi-',
t_amb='temp-amb-', w_vel='wind--')
pvsyst_copy = self.pvsyst.copy()
df_equality = pvsyst_copy.data.equals(self.pvsyst.data)
self.assertTrue(df_equality,
'Dataframe of copy not equal to original')
self.assertEqual(pvsyst_copy.column_groups, self.pvsyst.column_groups,
'Column groups dict of copy is not equal to original')
self.assertEqual(pvsyst_copy.trans_keys, self.pvsyst.trans_keys,
'Column groups keys are not equal to original.')
self.assertEqual(pvsyst_copy.regression_cols, self.pvsyst.regression_cols,
'Regression trans dict copy is not equal to orig.')
def test_irr_rc_balanced(self):
jun = self.pvsyst.data.loc['06/1990']
jun_cpy = jun.copy()
low = 0.5
high = 1.5
(irr_RC, jun_flt) = pvc.irr_rc_balanced(jun, low, high)
jun_filter_irr = jun_flt['GlobInc']
self.assertTrue(all(jun_flt.columns == jun.columns),
'Columns of input df missing in filtered ouput df.')
self.assertGreater(jun_flt.shape[0], 0,
'Returned df has no rows')
self.assertLess(jun_flt.shape[0], jun.shape[0],
'No rows removed from filtered df.')
self.assertTrue(jun.equals(jun_cpy),
'Input dataframe modified by function.')
self.assertGreater(irr_RC, jun[jun['GlobInc'] > 0]['GlobInc'].min(),
'Reporting irr not greater than min irr in input data')
self.assertLess(irr_RC, jun['GlobInc'].max(),
'Reporting irr no less than max irr in input data')
pts_below_irr = jun_filter_irr[jun_filter_irr.between(0, irr_RC)].shape[0]
perc_below = pts_below_irr / jun_filter_irr.shape[0]
self.assertLess(perc_below, 0.6,
'More than 60 percent of points below reporting irr')
self.assertGreaterEqual(perc_below, 0.5,
'Less than 50 percent of points below rep irr')
pts_above_irr = jun_filter_irr[jun_filter_irr.between(irr_RC, 1500)].shape[0]
perc_above = pts_above_irr / jun_filter_irr.shape[0]
self.assertGreater(perc_above, 0.4,
'Less than 40 percent of points above reporting irr')
self.assertLessEqual(perc_above, 0.5,
'More than 50 percent of points above reportin irr')
def test_filter_pvsyst_default(self):
self.pvsyst.filter_pvsyst()
self.assertEqual(self.pvsyst.data_filtered.shape[0], 8670,
'Data should contain 8670 points after removing any\
of IL Pmin, IL Pmax, IL Vmin, IL Vmax that are\
greater than zero.')
def test_filter_pvsyst_not_inplace(self):
df = self.pvsyst.filter_pvsyst(inplace=False)
self.assertIsInstance(df, pd.core.frame.DataFrame,
'Did not return DataFrame object.')
self.assertEqual(df.shape[0], 8670,
'Data should contain 8670 points after removing any\
of IL Pmin, IL Pmax, IL Vmin, IL Vmax that are\
greater than zero.')
def test_filter_pvsyst_missing_column(self):
self.pvsyst.drop_cols('IL Pmin')
self.pvsyst.filter_pvsyst()
def test_filter_pvsyst_missing_all_columns(self):
self.pvsyst.drop_cols(['IL Pmin', 'IL Vmin', 'IL Pmax', 'IL Vmax'])
self.pvsyst.filter_pvsyst()
def test_filter_shade_default(self):
self.pvsyst.filter_shade()
self.assertEqual(self.pvsyst.data_filtered.shape[0], 8645,
'Data should contain 8645 time periods\
without shade.')
def test_filter_shade_default_not_inplace(self):
df = self.pvsyst.filter_shade(inplace=False)
self.assertIsInstance(df, pd.core.frame.DataFrame,
'Did not return DataFrame object.')
self.assertEqual(df.shape[0], 8645,
'Returned dataframe should contain 8645 time periods\
without shade.')
def test_filter_shade_query(self):
# create PVsyst ShdLoss type values for testing query string
self.pvsyst.data.loc[self.pvsyst.data['FShdBm'] == 1.0, 'ShdLoss'] = 0
is_shaded = self.pvsyst.data['ShdLoss'].isna()
shdloss_values = 1 / self.pvsyst.data.loc[is_shaded, 'FShdBm'] * 100
self.pvsyst.data.loc[is_shaded, 'ShdLoss'] = shdloss_values
self.pvsyst.data_filtered = self.pvsyst.data.copy()
self.pvsyst.filter_shade(query_str='ShdLoss<=125')
self.assertEqual(self.pvsyst.data_filtered.shape[0], 8671,
'Filtered data should contain have 8671 periods with\
shade losses less than 125.')
class Test_pvlib_loc_sys(unittest.TestCase):
""" Test function wrapping pvlib get_clearsky method of Location."""
def test_pvlib_location(self):
loc = {'latitude': 30.274583,
'longitude': -97.740352,
'altitude': 500,
'tz': 'America/Chicago'}
loc_obj = pvc.pvlib_location(loc)
self.assertIsInstance(loc_obj,
pvlib.location.Location,
'Did not return instance of\
pvlib Location')
def test_pvlib_system(self):
fixed_sys = {'surface_tilt': 20,
'surface_azimuth': 180,
'albedo': 0.2}
tracker_sys1 = {'axis_tilt': 0, 'axis_azimuth': 0,
'max_angle': 90, 'backtrack': True,
'gcr': 0.2, 'albedo': 0.2}
tracker_sys2 = {'max_angle': 52, 'gcr': 0.3}
fx_sys = pvc.pvlib_system(fixed_sys)
trck_sys1 = pvc.pvlib_system(tracker_sys1)
trck_sys2 = pvc.pvlib_system(tracker_sys1)
self.assertIsInstance(fx_sys,
pvlib.pvsystem.PVSystem,
'Did not return instance of\
pvlib PVSystem')
self.assertIsInstance(trck_sys1,
pvlib.tracking.SingleAxisTracker,
'Did not return instance of\
pvlib SingleAxisTracker')
self.assertIsInstance(trck_sys2,
pvlib.tracking.SingleAxisTracker,
'Did not return instance of\
pvlib SingleAxisTracker')
# possible assertions for method returning ghi
# self.assertIsInstance(ghi,
# pd.core.series.Series,
# 'Second returned object is not an instance of\
# pandas Series.')
# self.assertEqual(ghi.name, 'ghi',
# 'Series data returned is not named ghi')
# self.assertEqual(ghi.shape[0], df.shape[0],
# 'Returned ghi does not have the same number of rows\
# as the passed dataframe.')
# self.assertEqual(df.index.tz, ghi.index.tz,
# 'Returned series index has different timezone from\
# passed dataframe.')
class Test_csky(unittest.TestCase):
"""Test clear sky function which returns pvlib ghi and poa clear sky."""
def setUp(self):
self.loc = {'latitude': 30.274583,
'longitude': -97.740352,
'altitude': 500,
'tz': 'America/Chicago'}
self.sys = {'surface_tilt': 20,
'surface_azimuth': 180,
'albedo': 0.2}
self.meas = pvc.CapData('meas')
self.df = self.meas.load_das('./tests/data/', 'example_meas_data.csv')
def test_get_tz_index_df(self):
"""Test that get_tz_index function returns a datetime index\
with a timezone when passed a dataframe without a timezone."""
# reindex test dataset to cover DST in the fall and spring
ix_3days = pd.date_range(start='11/3/2018', periods=864, freq='5min',
tz='America/Chicago')
ix_2days = pd.date_range(start='3/9/2019', periods=576, freq='5min',
tz='America/Chicago')
ix_dst = ix_3days.append(ix_2days)
ix_dst = ix_dst.tz_localize(None)
self.df.index = ix_dst
self.tz_ix = pvc.get_tz_index(self.df, self.loc)
self.assertIsInstance(self.tz_ix,
pd.core.indexes.datetimes.DatetimeIndex,
'Returned object is not a pandas DatetimeIndex.')
self.assertEqual(self.tz_ix.tz,
pytz.timezone(self.loc['tz']),
'Returned index does not have same timezone as\
the passed location dictionary.')
def test_get_tz_index_df_tz(self):
"""Test that get_tz_index function returns a datetime index\
with a timezone when passed a dataframe with a timezone."""
# reindex test dataset to cover DST in the fall and spring
ix_3days = pd.date_range(start='11/3/2018', periods=864, freq='5min',
tz='America/Chicago')
ix_2days = pd.date_range(start='3/9/2019', periods=576, freq='5min',
tz='America/Chicago')
ix_dst = ix_3days.append(ix_2days)
self.df.index = ix_dst
self.tz_ix = pvc.get_tz_index(self.df, self.loc)
self.assertIsInstance(self.tz_ix,
pd.core.indexes.datetimes.DatetimeIndex,
'Returned object is not a pandas DatetimeIndex.')
self.assertEqual(self.tz_ix.tz,
pytz.timezone(self.loc['tz']),
'Returned index does not have same timezone as\
the passed location dictionary.')
def test_get_tz_index_df_tz_warn(self):
"""Test that get_tz_index function returns warns when datetime index\
of dataframe does not match loc dic timezone."""
# reindex test dataset to cover DST in the fall and spring
ix_3days = pd.date_range(start='11/3/2018', periods=864, freq='5min',
tz='America/New_York')
ix_2days = pd.date_range(start='3/9/2019', periods=576, freq='5min',
tz='America/New_York')
ix_dst = ix_3days.append(ix_2days)
self.df.index = ix_dst
with self.assertWarns(UserWarning):
self.tz_ix = pvc.get_tz_index(self.df, self.loc)
def test_get_tz_index_ix_tz(self):
"""Test that get_tz_index function returns a datetime index
with a timezone when passed a datetime index with a timezone."""
self.ix = pd.date_range(start='1/1/2019', periods=8760, freq='H',
tz='America/Chicago')
self.tz_ix = pvc.get_tz_index(self.ix, self.loc)
self.assertIsInstance(self.tz_ix,
pd.core.indexes.datetimes.DatetimeIndex,
'Returned object is not a pandas DatetimeIndex.')
# If passing an index with a timezone use that timezone rather than
# the timezone in the location dictionary if there is one.
self.assertEqual(self.tz_ix.tz,
self.ix.tz,
'Returned index does not have same timezone as\
the passed index.')
def test_get_tz_index_ix_tz_warn(self):
"""Test that get_tz_index function warns when DatetimeIndex timezone
does not match the location dic timezone.
"""
self.ix = pd.date_range(start='1/1/2019', periods=8760, freq='H',
tz='America/New_York')
with self.assertWarns(UserWarning):
self.tz_ix = pvc.get_tz_index(self.ix, self.loc)
def test_get_tz_index_ix(self):
"""Test that get_tz_index function returns a datetime index\
with a timezone when passed a datetime index without a timezone."""
self.ix = pd.date_range(start='1/1/2019', periods=8760, freq='H',
tz='America/Chicago')
# remove timezone info but keep missing hour and extra hour due to DST
self.ix = self.ix.tz_localize(None)
self.tz_ix = pvc.get_tz_index(self.ix, self.loc)
self.assertIsInstance(self.tz_ix,
pd.core.indexes.datetimes.DatetimeIndex,
'Returned object is not a pandas DatetimeIndex.')
# If passing an index without a timezone use returned index should have
# the timezone of the passed location dictionary.
self.assertEqual(self.tz_ix.tz,
pytz.timezone(self.loc['tz']),
'Returned index does not have same timezone as\
the passed location dictionary.')
def test_csky_concat(self):
# concat=True by default
csky_ghi_poa = pvc.csky(self.df, loc=self.loc, sys=self.sys)
self.assertIsInstance(csky_ghi_poa, pd.core.frame.DataFrame,
'Did not return a pandas dataframe.')
self.assertEqual(csky_ghi_poa.shape[1],
self.df.shape[1] + 2,
'Returned dataframe does not have 2 new columns.')
self.assertIn('ghi_mod_csky', csky_ghi_poa.columns,
'Modeled clear sky ghi not in returned dataframe columns')
self.assertIn('poa_mod_csky', csky_ghi_poa.columns,
'Modeled clear sky poa not in returned dataframe columns')
# assumes typical orientation is used to calculate the poa irradiance
self.assertGreater(csky_ghi_poa.loc['10/9/1990 12:30',
'poa_mod_csky'],
csky_ghi_poa.loc['10/9/1990 12:30',
'ghi_mod_csky'],
'POA is not greater than GHI at 12:30.')
self.assertEqual(csky_ghi_poa.index.tz,
self.df.index.tz,
'Returned dataframe index timezone is not the same as\
passed dataframe.')
def test_csky_not_concat(self):
csky_ghi_poa = pvc.csky(self.df, loc=self.loc, sys=self.sys,
concat=False)
self.assertIsInstance(csky_ghi_poa, pd.core.frame.DataFrame,
'Did not return a pandas dataframe.')
self.assertEqual(csky_ghi_poa.shape[1], 2,
'Returned dataframe does not have 2 columns.')
self.assertIn('ghi_mod_csky', csky_ghi_poa.columns,
'Modeled clear sky ghi not in returned dataframe columns')
self.assertIn('poa_mod_csky', csky_ghi_poa.columns,
'Modeled clear sky poa not in returned dataframe columns')
# assumes typical orientation is used to calculate the poa irradiance
self.assertGreater(csky_ghi_poa.loc['10/9/1990 12:30',
'poa_mod_csky'],
csky_ghi_poa.loc['10/9/1990 12:30',
'ghi_mod_csky'],
'POA is not greater than GHI at 12:30.')
self.assertEqual(csky_ghi_poa.index.tz,
self.df.index.tz,
'Returned dataframe index timezone is not the same as\
passed dataframe.')
def test_csky_not_concat_poa_all(self):
csky_ghi_poa = pvc.csky(self.df, loc=self.loc, sys=self.sys,
concat=False, output='poa_all')
self.assertIsInstance(csky_ghi_poa, pd.core.frame.DataFrame,
'Did not return a pandas dataframe.')
self.assertEqual(csky_ghi_poa.shape[1], 5,
'Returned dataframe does not have 5 columns.')
cols = ['poa_global', 'poa_direct', 'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse']
for col in cols:
self.assertIn(col, csky_ghi_poa.columns,
'{} not in the columns of returned\
dataframe'.format(col))
# assumes typical orientation is used to calculate the poa irradiance
self.assertEqual(csky_ghi_poa.index.tz,
self.df.index.tz,
'Returned dataframe index timezone is not the same as\
passed dataframe.')
def test_csky_not_concat_ghi_all(self):
csky_ghi_poa = pvc.csky(self.df, loc=self.loc, sys=self.sys,
concat=False, output='ghi_all')
self.assertIsInstance(csky_ghi_poa, pd.core.frame.DataFrame,
'Did not return a pandas dataframe.')
self.assertEqual(csky_ghi_poa.shape[1], 3,
'Returned dataframe does not have 3 columns.')
cols = ['ghi', 'dni', 'dhi']
for col in cols:
self.assertIn(col, csky_ghi_poa.columns,
'{} not in the columns of returned\
dataframe'.format(col))
# assumes typical orientation is used to calculate the poa irradiance
self.assertEqual(csky_ghi_poa.index.tz,
self.df.index.tz,
'Returned dataframe index timezone is not the same as\
passed dataframe.')
def test_csky_not_concat_all(self):
csky_ghi_poa = pvc.csky(self.df, loc=self.loc, sys=self.sys,
concat=False, output='all')
self.assertIsInstance(csky_ghi_poa, pd.core.frame.DataFrame,
'Did not return a pandas dataframe.')
self.assertEqual(csky_ghi_poa.shape[1], 8,
'Returned dataframe does not have 8 columns.')
cols = ['ghi', 'dni', 'dhi', 'poa_global', 'poa_direct', 'poa_diffuse',
'poa_sky_diffuse', 'poa_ground_diffuse']
for col in cols:
self.assertIn(col, csky_ghi_poa.columns,
'{} not in the columns of returned\
dataframe'.format(col))
# assumes typical orientation is used to calculate the poa irradiance
self.assertEqual(csky_ghi_poa.index.tz,
self.df.index.tz,
'Returned dataframe index timezone is not the same as\
passed dataframe.')
"""
Change csky to two functions for creating pvlib location and system objects.
Separate function calling location and system to calculate POA
- concat add columns to passed df or return just ghi and poa option
load_data calls final function with in place to get ghi and poa
"""
class TestGetRegCols(unittest.TestCase):
def setUp(self):
self.das = pvc.CapData('das')
self.das.load_data(path='./tests/data/',
fname='example_meas_data_aeheaders.csv',
source='AlsoEnergy')
self.das.set_regression_cols(power='-mtr-', poa='irr-poa-',
t_amb='temp-amb-', w_vel='wind--')
def test_not_aggregated(self):
with self.assertWarns(UserWarning):
self.das.get_reg_cols()
def test_all_coeffs(self):
self.das.agg_sensors()
cols = ['power', 'poa', 't_amb', 'w_vel']
df = self.das.get_reg_cols()
self.assertEqual(len(df.columns), 4,
'Returned number of columns is incorrect.')
self.assertEqual(df.columns.to_list(), cols,
'Columns are not renamed properly.')
self.assertEqual(self.das.data['-mtr-sum-agg'].iloc[100],
df['power'].iloc[100],
'Data in column labeled power is not power.')
self.assertEqual(self.das.data['irr-poa-mean-agg'].iloc[100],
df['poa'].iloc[100],
'Data in column labeled poa is not poa.')
self.assertEqual(self.das.data['temp-amb-mean-agg'].iloc[100],
df['t_amb'].iloc[100],
'Data in column labeled t_amb is not t_amb.')
self.assertEqual(self.das.data['wind--mean-agg'].iloc[100],
df['w_vel'].iloc[100],
'Data in column labeled w_vel is not w_vel.')
def test_poa_power(self):
self.das.agg_sensors()
cols = ['poa', 'power']
df = self.das.get_reg_cols(reg_vars=cols)
self.assertEqual(len(df.columns), 2,
'Returned number of columns is incorrect.')
self.assertEqual(df.columns.to_list(), cols,
'Columns are not renamed properly.')
self.assertEqual(self.das.data['-mtr-sum-agg'].iloc[100],
df['power'].iloc[100],
'Data in column labeled power is not power.')
self.assertEqual(self.das.data['irr-poa-mean-agg'].iloc[100],
df['poa'].iloc[100],
'Data in column labeled poa is not poa.')
def test_agg_sensors_mix(self):
"""
Test when agg_sensors resets regression_cols values to a mix of trans keys
and column names.
"""
self.das.agg_sensors(agg_map={'-inv-': 'sum', 'irr-poa-': 'mean',
'temp-amb-': 'mean', 'wind--': 'mean'})
cols = ['poa', 'power']
df = self.das.get_reg_cols(reg_vars=cols)
mtr_col = self.das.column_groups[self.das.regression_cols['power']][0]
self.assertEqual(len(df.columns), 2,
'Returned number of columns is incorrect.')
self.assertEqual(df.columns.to_list(), cols,
'Columns are not renamed properly.')
self.assertEqual(self.das.data[mtr_col].iloc[100],
df['power'].iloc[100],
'Data in column labeled power is not power.')
self.assertEqual(self.das.data['irr-poa-mean-agg'].iloc[100],
df['poa'].iloc[100],
'Data in column labeled poa is not poa.')
class TestAggSensors(unittest.TestCase):
def setUp(self):
self.das = pvc.CapData('das')
self.das.load_data(path='./tests/data/',
fname='example_meas_data_aeheaders.csv',
source='AlsoEnergy')
self.das.set_regression_cols(power='-mtr-', poa='irr-poa-',
t_amb='temp-amb-', w_vel='wind--')
def test_agg_map_none(self):
self.das.agg_sensors()
self.assertEqual(self.das.data_filtered.shape[1], self.das.data.shape[1],
'df and data_filtered should have same number of rows.')
self.assertEqual(self.das.data_filtered.shape[0], self.das.data.shape[0],
'Agg method inadverdently changed number of rows.')
self.assertIn('-mtr-sum-agg', self.das.data_filtered.columns,
'Sum of power trans group not in aggregated df.')
self.assertIn('irr-poa-mean-agg', self.das.data_filtered.columns,
'Mean of poa trans group not in aggregated df.')
self.assertIn('temp-amb-mean-agg', self.das.data_filtered.columns,
'Mean of amb temp trans group not in aggregated df.')
self.assertIn('wind--mean-agg', self.das.data_filtered.columns,
'Mean of wind trans group not in aggregated df.')
def test_agg_map_none_inplace_false(self):
df_flt_copy = self.das.data_filtered.copy()
df = self.das.agg_sensors(inplace=False)
self.assertEqual(df.shape[1], self.das.data.shape[1] + 4,
'Returned df does not include 4 additional cols.')
self.assertEqual(df.shape[0], self.das.data.shape[0],
'Agg method inadverdently changed number of rows.')
self.assertIn('-mtr-sum-agg', df.columns,
'Sum of power trans group not in aggregated df.')
self.assertIn('irr-poa-mean-agg', df.columns,
'Mean of poa trans group not in aggregated df.')
self.assertIn('temp-amb-mean-agg', df.columns,
'Mean of amb temp trans group not in aggregated df.')
self.assertIn('wind--mean-agg', df.columns,
'Mean of wind trans group not in aggregated df.')
self.assertTrue(df_flt_copy.equals(self.das.data_filtered),
'Method with inplace false changed data_filtered attribute.')
def test_agg_map_none_keep_false(self):
self.das.agg_sensors(keep=False)
self.assertEqual(self.das.data_filtered.shape[1], 4,
'Returned dataframe does not have 4 columns.')
self.assertEqual(self.das.data_filtered.shape[0], self.das.data.shape[0],
'Agg method inadverdently changed number of rows.')
self.assertIn('-mtr-sum-agg', self.das.data_filtered.columns,
'Sum of power trans group not in aggregated df.')
self.assertIn('irr-poa-mean-agg', self.das.data_filtered.columns,
'Mean of poa trans group not in aggregated df.')
self.assertIn('temp-amb-mean-agg', self.das.data_filtered.columns,
'Mean of amb temp trans group not in aggregated df.')
self.assertIn('wind--mean-agg', self.das.data_filtered.columns,
'Mean of wind trans group not in aggregated df.')
def test_agg_map_non_str_func(self):
self.das.agg_sensors(agg_map={'irr-poa-': np.mean})
self.assertEqual(self.das.data_filtered.shape[1], self.das.data.shape[1],
'df and data_filtered should have same number of rows.')
self.assertEqual(self.das.data_filtered.shape[0], self.das.data.shape[0],
'Agg method inadverdently changed number of rows.')
self.assertIn('irr-poa-mean-agg', self.das.data_filtered.columns,
'Mean of poa trans group not in aggregated df.')
def test_agg_map_mix_funcs(self):
self.das.agg_sensors(agg_map={'irr-poa-': [np.mean, 'sum']})
self.assertEqual(self.das.data_filtered.shape[1], self.das.data.shape[1],
'df and data_filtered should have same number of rows.')
self.assertEqual(self.das.data_filtered.shape[0], self.das.data.shape[0],
'Agg method inadverdently changed number of rows.')
self.assertIn('irr-poa-mean-agg', self.das.data_filtered.columns,
'Mean of poa trans group not in aggregated df.')
self.assertIn('irr-poa-sum-agg', self.das.data_filtered.columns,
'Sum of poa trans group not in aggregated df.')
def test_agg_map_update_regression_cols(self):
self.das.agg_sensors()
self.assertEqual(self.das.regression_cols['power'], '-mtr-sum-agg',
'Power regression_cols not updated to agg column.')
self.assertEqual(self.das.regression_cols['poa'], 'irr-poa-mean-agg',
'POA regression_cols not updated to agg column.')
self.assertEqual(self.das.regression_cols['t_amb'], 'temp-amb-mean-agg',
'Amb temp regression_cols not updated to agg column.')
self.assertEqual(self.das.regression_cols['w_vel'], 'wind--mean-agg',
'Wind velocity regression_cols not updated to agg column.')
def test_reset_summary(self):
self.das.agg_sensors()
self.assertEqual(len(self.das.summary), 0,
'Summary data not reset.')
self.assertEqual(len(self.das.summary_ix), 0,
'Summary index not reset.')
def test_reset_agg_method(self):
orig_df = self.das.data.copy()
orig_trans = self.das.column_groups.copy()
orig_reg_trans = self.das.regression_cols.copy()
self.das.agg_sensors()
self.das.filter_irr(200, 500)
self.das.reset_agg()
self.assertTrue(self.das.data.equals(orig_df),
'df attribute does not match pre-agg df after reset.')
self.assertTrue(all(self.das.data_filtered.columns == orig_df.columns),
'Filtered dataframe does not have same columns as'
'original dataframe after resetting agg.')
self.assertLess(self.das.data_filtered.shape[0], orig_df.shape[0],
'Filtering overwritten by reset agg method.')
def test_warn_if_filters_already_run(self):
"""
Warn if method is writing over filtering already applied to data_filtered.
"""
poa_key = self.das.regression_cols['poa']
self.das.column_groups[poa_key] = [self.das.column_groups[poa_key][0]]
self.das.filter_irr(200, 800)
with self.assertWarns(UserWarning):
self.das.agg_sensors()
class TestFilterSensors(unittest.TestCase):
def setUp(self):
self.das = pvc.CapData('das')
self.das.load_data(path='./tests/data/',
fname='example_meas_data.csv',
column_type_report=False)
self.das.set_regression_cols(power='-mtr-', poa='irr-poa-ref_cell',
t_amb='temp-amb-', w_vel='wind--')
def test_perc_diff_none(self):
rows_before_flt = self.das.data_filtered.shape[0]
self.das.filter_sensors(perc_diff=None, inplace=True)
self.assertIsInstance(self.das.data_filtered, pd.core.frame.DataFrame,
'Did not dave a dataframe to data_filtered.')
self.assertLess(self.das.data_filtered.shape[0], rows_before_flt,
'No rows removed.')
def test_perc_diff(self):
rows_before_flt = self.das.data_filtered.shape[0]
self.das.filter_sensors(perc_diff={'irr-poa-ref_cell': 0.05,
'temp-amb-': 0.1},
inplace=True)
self.assertIsInstance(self.das.data_filtered, pd.core.frame.DataFrame,
'Did not dave a dataframe to data_filtered.')
self.assertLess(self.das.data_filtered.shape[0], rows_before_flt,
'No rows removed.')
def test_after_agg_sensors(self):
rows_before_flt = self.das.data_filtered.shape[0]
self.das.agg_sensors(agg_map={'-inv-': 'sum',
'irr-poa-ref_cell': 'mean',
'wind--': 'mean',
'temp-amb-': 'mean'})
self.das.filter_sensors(perc_diff={'irr-poa-ref_cell': 0.05,
'temp-amb-': 0.1},
inplace=True)
self.assertIsInstance(self.das.data_filtered, pd.core.frame.DataFrame,
'Did not dave a dataframe to data_filtered.')
self.assertLess(self.das.data_filtered.shape[0], rows_before_flt,
'No rows removed.')
self.assertIn('-inv-sum-agg', self.das.data_filtered.columns,
'filter_sensors did not retain aggregation columns.')
class TestRepCondNoFreq(unittest.TestCase):
def setUp(self):
self.meas = pvc.CapData('meas')
self.meas.load_data(path='./tests/data/', fname='nrel_data.csv',
source='AlsoEnergy')
self.meas.set_regression_cols(power='', poa='irr-poa-',
t_amb='temp--', w_vel='wind--')
def test_defaults(self):
self.meas.rep_cond()
self.assertIsInstance(self.meas.rc, pd.core.frame.DataFrame,
'No dataframe stored in the rc attribute.')
def test_defaults_wvel(self):
self.meas.rep_cond(w_vel=50)
self.assertEqual(self.meas.rc['w_vel'][0], 50,
'Wind velocity not overwritten by user value')
def test_defaults_not_inplace(self):
df = self.meas.rep_cond(inplace=False)
self.assertIsNone(self.meas.rc,
'Method result stored instead of returned.')
self.assertIsInstance(df, pd.core.frame.DataFrame,
'No dataframe returned from method.')
def test_irr_bal_inplace(self):
self.meas.filter_irr(0.1, 2000)
meas2 = self.meas.copy()
meas2.rep_cond()
self.meas.rep_cond(irr_bal=True, percent_filter=20)
self.assertIsInstance(self.meas.rc, pd.core.frame.DataFrame,
'No dataframe stored in the rc attribute.')
self.assertNotEqual(self.meas.rc['poa'][0], meas2.rc['poa'][0],
'Irr_bal function returned same result\
as w/o irr_bal')
def test_irr_bal_inplace_wvel(self):
self.meas.rep_cond(irr_bal=True, percent_filter=20, w_vel=50)
self.assertEqual(self.meas.rc['w_vel'][0], 50,
'Wind velocity not overwritten by user value')
def test_irr_bal_inplace_no_percent_filter(self):
with self.assertWarns(UserWarning):
self.meas.rep_cond(irr_bal=True, percent_filter=None)
class TestRepCondFreq(unittest.TestCase):
def setUp(self):
self.pvsyst = pvc.CapData('pvsyst')
self.pvsyst.load_data(path='./tests/data/',
fname='pvsyst_example_HourlyRes_2.CSV',
load_pvsyst=True)
self.pvsyst.set_regression_cols(power='real_pwr--', poa='irr-poa-',
t_amb='temp-amb-', w_vel='wind--')
def test_monthly_no_irr_bal(self):
self.pvsyst.rep_cond(freq='M')
self.assertIsInstance(self.pvsyst.rc, pd.core.frame.DataFrame,
'No dataframe stored in the rc attribute.')
self.assertEqual(self.pvsyst.rc.shape[0], 12,
'Rep conditions dataframe does not have 12 rows.')
def test_monthly_irr_bal(self):
self.pvsyst.rep_cond(freq='M', irr_bal=True, percent_filter=20)
self.assertIsInstance(self.pvsyst.rc, pd.core.frame.DataFrame,
'No dataframe stored in the rc attribute.')
self.assertEqual(self.pvsyst.rc.shape[0], 12,
'Rep conditions dataframe does not have 12 rows.')
def test_seas_no_irr_bal(self):
self.pvsyst.rep_cond(freq='BQ-NOV', irr_bal=False)
self.assertIsInstance(self.pvsyst.rc, pd.core.frame.DataFrame,
'No dataframe stored in the rc attribute.')
self.assertEqual(self.pvsyst.rc.shape[0], 4,
'Rep conditions dataframe does not have 4 rows.')
class TestPredictCapacities(unittest.TestCase):
def setUp(self):
self.pvsyst = pvc.CapData('pvsyst')
self.pvsyst.load_data(path='./tests/data/',
fname='pvsyst_example_HourlyRes_2.CSV',
load_pvsyst=True)
self.pvsyst.set_regression_cols(power='real_pwr--', poa='irr-poa-',
t_amb='temp-amb-', w_vel='wind--')
self.pvsyst.filter_irr(200, 800)
self.pvsyst.tolerance = '+/- 5'
def test_monthly(self):
self.pvsyst.rep_cond(freq='MS')
pred_caps = self.pvsyst.predict_capacities(irr_filter=True, percent_filter=20)
july_grpby = pred_caps.loc['1990-07-01', 'PredCap']
self.assertIsInstance(pred_caps, pd.core.frame.DataFrame,
'Returned object is not a Dataframe.')
self.assertEqual(pred_caps.shape[0], 12,
'Predicted capacities does not have 12 rows.')
self.pvsyst.data_filtered = self.pvsyst.data_filtered.loc['7/1/90':'7/31/90', :]
self.pvsyst.rep_cond()
self.pvsyst.filter_irr(0.8, 1.2, ref_val=self.pvsyst.rc['poa'][0])
df = self.pvsyst.rview(['power', 'poa', 't_amb', 'w_vel'],
filtered_data=True)
rename = {df.columns[0]: 'power',
df.columns[1]: 'poa',
df.columns[2]: 't_amb',
df.columns[3]: 'w_vel'}
df = df.rename(columns=rename)
reg = pvc.fit_model(df)
july_manual = reg.predict(self.pvsyst.rc)[0]
self.assertEqual(july_manual, july_grpby,
'Manual prediction for July {} is not equal'
'to the predict_capacites groupby'
'prediction {}'.format(july_manual, july_grpby))
def test_no_irr_filter(self):
self.pvsyst.rep_cond(freq='M')
pred_caps = self.pvsyst.predict_capacities(irr_filter=False)
self.assertIsInstance(pred_caps, pd.core.frame.DataFrame,
'Returned object is not a Dataframe.')
self.assertEqual(pred_caps.shape[0], 12,
'Predicted capacities does not have 12 rows.')
def test_rc_from_irrBal(self):
self.pvsyst.rep_cond(freq='M', irr_bal=True, percent_filter=20)
pred_caps = self.pvsyst.predict_capacities(irr_filter=False)
self.assertIsInstance(pred_caps, pd.core.frame.DataFrame,
'Returned object is {} not a\
Dataframe.'.format(type(pred_caps)))
self.assertEqual(pred_caps.shape[0], 12,
'Predicted capacities does not have 12 rows.')
def test_seasonal_freq(self):
self.pvsyst.rep_cond(freq='BQ-NOV')
pred_caps = self.pvsyst.predict_capacities(irr_filter=True, percent_filter=20)
self.assertIsInstance(pred_caps, pd.core.frame.DataFrame,
'Returned object is {} not a\
Dataframe.'.format(type(pred_caps)))
self.assertEqual(pred_caps.shape[0], 4,
'Predicted capacities has {} rows instead of 4\
rows.'.format(pred_caps.shape[0]))
class TestFilterIrr(unittest.TestCase):
def setUp(self):
self.meas = pvc.CapData('meas')
self.meas.load_data('./tests/data/', 'nrel_data.csv',
source='AlsoEnergy')
self.meas.set_regression_cols(power='', poa='irr-poa-',
t_amb='temp--', w_vel='wind--')
def test_get_poa_col(self):
col = self.meas._CapData__get_poa_col()
self.assertEqual(col, 'POA 40-South CMP11 [W/m^2]',
'POA column not returned')
def test_get_poa_col_multcols(self):
self.meas.data['POA second column'] = self.meas.rview('poa').values
self.meas.group_columns()
with self.assertWarns(UserWarning):
col = self.meas._CapData__get_poa_col()
def test_lowhigh_nocol(self):
pts_before = self.meas.data_filtered.shape[0]
self.meas.filter_irr(500, 600, ref_val=None, col_name=None,
inplace=True)
self.assertLess(self.meas.data_filtered.shape[0], pts_before,
'Filter did not remove points.')
def test_lowhigh_colname(self):
pts_before = self.meas.data_filtered.shape[0]
self.meas.data['POA second column'] = self.meas.rview('poa').values
self.meas.group_columns()
self.meas.data_filtered = self.meas.data.copy()
self.meas.filter_irr(500, 600, ref_val=None,
col_name='POA second column', inplace=True)
self.assertLess(self.meas.data_filtered.shape[0], pts_before,
'Filter did not remove points.')
def test_refval_nocol(self):
pts_before = self.meas.data_filtered.shape[0]
self.meas.filter_irr(0.8, 1.2, ref_val=500, col_name=None,
inplace=True)
self.assertLess(self.meas.data_filtered.shape[0], pts_before,
'Filter did not remove points.')
def test_refval_withcol(self):
pts_before = self.meas.data_filtered.shape[0]
self.meas.data['POA second column'] = self.meas.rview('poa').values
self.meas.group_columns()
self.meas.data_filtered = self.meas.data.copy()
self.meas.filter_irr(0.8, 1.2, ref_val=500,
col_name='POA second column', inplace=True)
self.assertLess(self.meas.data_filtered.shape[0], pts_before,
'Filter did not remove points.')
def test_refval_withcol_notinplace(self):
pts_before = self.meas.data_filtered.shape[0]
df = self.meas.filter_irr(500, 600, ref_val=None, col_name=None,
inplace=False)
self.assertEqual(self.meas.data_filtered.shape[0], pts_before,
'Filter removed points from data_filtered.')
self.assertIsInstance(df, pd.core.frame.DataFrame,
'Did not return DataFrame object.')
self.assertLess(df.shape[0], pts_before,
'Filter did not remove points from returned DataFrame.')
class TestGetSummary(unittest.TestCase):
def setUp(self):
self.meas = pvc.CapData('meas')
self.meas.load_data('./tests/data/', 'nrel_data.csv',
source='AlsoEnergy')
self.meas.set_regression_cols(power='', poa='irr-poa-',
t_amb='temp--', w_vel='wind--')
def test_col_names(self):
self.meas.filter_irr(200, 500)
smry = self.meas.get_summary()
self.assertEqual(smry.columns[0], 'pts_after_filter',
'First column of summary data is not labeled '
'pts_after_filter.')
self.assertEqual(smry.columns[1], 'pts_removed',
'First column of summary data is not labeled '
'pts_removed.')
self.assertEqual(smry.columns[2], 'filter_arguments',
'First column of summary data is not labeled '
'filter_arguments.')
class TestFilterTime(unittest.TestCase):
def setUp(self):
self.pvsyst = pvc.CapData('pvsyst')
self.pvsyst.load_data(path='./tests/data/',
fname='pvsyst_example_HourlyRes_2.CSV',
load_pvsyst=True)
self.pvsyst.set_regression_cols(power='real_pwr--', poa='irr-poa-',
t_amb='temp-amb-', w_vel='wind--')
def test_start_end(self):
self.pvsyst.filter_time(start='2/1/90', end='2/15/90')
self.assertEqual(self.pvsyst.data_filtered.index[0],
pd.Timestamp(year=1990, month=2, day=1, hour=0),
'First timestamp should be 2/1/1990')
self.assertEqual(self.pvsyst.data_filtered.index[-1],
pd.Timestamp(year=1990, month=2, day=15, hour=00),
'Last timestamp should be 2/15/1990 00:00')
def test_start_days(self):
self.pvsyst.filter_time(start='2/1/90', days=15)
self.assertEqual(self.pvsyst.data_filtered.index[0],
pd.Timestamp(year=1990, month=2, day=1, hour=0),
'First timestamp should be 2/1/1990')
self.assertEqual(self.pvsyst.data_filtered.index[-1],
pd.Timestamp(year=1990, month=2, day=16, hour=00),
'Last timestamp should be 2/15/1990 00:00')
def test_end_days(self):
self.pvsyst.filter_time(end='2/16/90', days=15)
self.assertEqual(self.pvsyst.data_filtered.index[0],
pd.Timestamp(year=1990, month=2, day=1, hour=0),
'First timestamp should be 2/1/1990')
self.assertEqual(self.pvsyst.data_filtered.index[-1],
pd.Timestamp(year=1990, month=2, day=16, hour=00),
'Last timestamp should be 2/15/1990 00:00')
def test_test_date(self):
self.pvsyst.filter_time(test_date='2/16/90', days=30)
self.assertEqual(self.pvsyst.data_filtered.index[0],
pd.Timestamp(year=1990, month=2, day=1, hour=0),
'First timestamp should be 2/1/1990')
self.assertEqual(self.pvsyst.data_filtered.index[-1],
pd.Timestamp(year=1990, month=3, day=3, hour=00),
'Last timestamp should be 3/2/1990 00:00')
def test_start_end_not_inplace(self):
df = self.pvsyst.filter_time(start='2/1/90', end='2/15/90',
inplace=False)
self.assertEqual(df.index[0],
pd.Timestamp(year=1990, month=2, day=1, hour=0),
'First timestamp should be 2/1/1990')
self.assertEqual(df.index[-1],
pd.Timestamp(year=1990, month=2, day=15, hour=00),
'Last timestamp should be 2/15/1990 00:00')
def test_start_no_days(self):
with self.assertWarns(UserWarning):
self.pvsyst.filter_time(start='2/1/90')
def test_end_no_days(self):
with self.assertWarns(UserWarning):
self.pvsyst.filter_time(end='2/1/90')
def test_test_date_no_days(self):
with self.assertWarns(UserWarning):
self.pvsyst.filter_time(test_date='2/1/90')
class TestFilterDays(unittest.TestCase):
def setUp(self):
self.pvsyst = pvc.CapData('pvsyst')
self.pvsyst.load_data(path='./tests/data/',
fname='pvsyst_example_HourlyRes_2.CSV',
load_pvsyst=True)
self.pvsyst.set_regression_cols(power='real_pwr--', poa='irr-poa-',
t_amb='temp-amb-', w_vel='wind--')
def test_keep_one_day(self):
self.pvsyst.filter_days(['10/5/1990'], drop=False, inplace=True)
self.assertEqual(self.pvsyst.data_filtered.shape[0], 24)
self.assertEqual(self.pvsyst.data_filtered.index[0].day, 5)
def test_keep_two_contiguous_days(self):
self.pvsyst.filter_days(['10/5/1990', '10/6/1990'], drop=False,
inplace=True)
self.assertEqual(self.pvsyst.data_filtered.shape[0], 48)
self.assertEqual(self.pvsyst.data_filtered.index[-1].day, 6)
def test_keep_three_noncontiguous_days(self):
self.pvsyst.filter_days(['10/5/1990', '10/7/1990', '10/9/1990'],
drop=False, inplace=True)
self.assertEqual(self.pvsyst.data_filtered.shape[0], 72)
self.assertEqual(self.pvsyst.data_filtered.index[0].day, 5)
self.assertEqual(self.pvsyst.data_filtered.index[25].day, 7)
self.assertEqual(self.pvsyst.data_filtered.index[49].day, 9)
def test_drop_one_day(self):
self.pvsyst.filter_days(['1/1/1990'], drop=True, inplace=True)
self.assertEqual(self.pvsyst.data_filtered.shape[0], (8760 - 24))
self.assertEqual(self.pvsyst.data_filtered.index[0].day, 2)
self.assertEqual(self.pvsyst.data_filtered.index[0].hour, 0)
def test_drop_three_days(self):
self.pvsyst.filter_days(['1/1/1990', '1/3/1990', '1/5/1990'],
drop=True, inplace=True)
self.assertEqual(self.pvsyst.data_filtered.shape[0], (8760 - 24 * 3))
self.assertEqual(self.pvsyst.data_filtered.index[0].day, 2)
self.assertEqual(self.pvsyst.data_filtered.index[25].day, 4)
self.assertEqual(self.pvsyst.data_filtered.index[49].day, 6)
def test_not_inplace(self):
df = self.pvsyst.filter_days(['10/5/1990'], drop=False, inplace=False)
self.assertEqual(self.pvsyst.data_filtered.shape[0], 8760)
self.assertEqual(df.shape[0], 24)
class TestFilterPF(unittest.TestCase):
def setUp(self):
self.meas = pvc.CapData('meas')
self.meas.load_data(path='./tests/data/', fname='nrel_data.csv',
source='AlsoEnergy')
self.meas.set_regression_cols(power='', poa='irr-poa-',
t_amb='temp--', w_vel='wind--')
def test_pf(self):
pf = np.ones(5)
pf = np.append(pf, np.ones(5) * -1)
pf = np.append(pf, np.arange(0, 1, 0.1))
self.meas.data['pf'] = np.tile(pf, 576)
self.meas.data_filtered = self.meas.data.copy()
self.meas.group_columns()
self.meas.filter_pf(1)
self.assertEqual(self.meas.data_filtered.shape[0], 5760,
'Incorrect number of points removed.')
class TestFilterOutliersAndPower(unittest.TestCase):
def setUp(self):
self.das = pvc.CapData('das')
self.das.load_data(path='./tests/data/',
fname='example_meas_data_aeheaders.csv',
source='AlsoEnergy')
self.das.set_regression_cols(power='-mtr-', poa='irr-poa-',
t_amb='temp-amb-', w_vel='wind--')
def test_not_aggregated(self):
with self.assertWarns(UserWarning):
self.das.filter_outliers()
def test_filter_power_defaults(self):
self.das.filter_power(5_000_000, percent=None, columns=None,
inplace=True)
self.assertEqual(self.das.data_filtered.shape[0], 1289)
def test_filter_power_percent(self):
self.das.filter_power(6_000_000, percent=0.05, columns=None,
inplace=True)
self.assertEqual(self.das.data_filtered.shape[0], 1388)
def test_filter_power_a_column(self):
self.das.filter_power(5_000_000, percent=None,
columns='Elkor Production Meter KW, kW',
inplace=True)
self.assertEqual(self.das.data_filtered.shape[0], 1289)
def test_filter_power_column_group(self):
self.das.filter_power(500_000, percent=None, columns='-inv-',
inplace=True)
self.assertEqual(self.das.data_filtered.shape[0], 1138)
def test_filter_power_columns_not_str(self):
with self.assertWarns(UserWarning):
self.das.filter_power(500_000, percent=None, columns=1,
inplace=True)
class Test_Csky_Filter(unittest.TestCase):
"""
Tests for filter_clearsky method.
"""
def setUp(self):
self.meas = pvc.CapData('meas')
loc = {'latitude': 39.742, 'longitude': -105.18,
'altitude': 1828.8, 'tz': 'Etc/GMT+7'}
sys = {'surface_tilt': 40, 'surface_azimuth': 180,
'albedo': 0.2}
self.meas.load_data(path='./tests/data/', fname='nrel_data.csv',
source='AlsoEnergy', clear_sky=True, loc=loc, sys=sys)
def test_default(self):
self.meas.filter_clearsky()
self.assertLess(self.meas.data_filtered.shape[0],
self.meas.data.shape[0],
'Filtered dataframe should have less rows.')
self.assertEqual(self.meas.data_filtered.shape[1],
self.meas.data.shape[1],
'Filtered dataframe should have equal number of cols.')
for i, col in enumerate(self.meas.data_filtered.columns):
self.assertEqual(col, self.meas.data.columns[i],
'Filter changed column {} to '
'{}'.format(self.meas.data.columns[i], col))
def test_two_ghi_cols(self):
self.meas.data['ws 2 ghi W/m^2'] = self.meas.view('irr-ghi-') * 1.05
self.meas.data_filtered = self.meas.data.copy()
self.meas.group_columns()
with self.assertWarns(UserWarning):
self.meas.filter_clearsky()
def test_mult_ghi_categories(self):
cn = 'irrad ghi pyranometer W/m^2'
self.meas.data[cn] = self.meas.view('irr-ghi-') * 1.05
self.meas.group_columns()
with self.assertWarns(UserWarning):
self.meas.filter_clearsky()
def test_no_clear_ghi(self):
self.meas.drop_cols('ghi_mod_csky')
with self.assertWarns(UserWarning):
self.meas.filter_clearsky()
def test_specify_ghi_col(self):
self.meas.data['ws 2 ghi W/m^2'] = self.meas.view('irr-ghi-') * 1.05
self.meas.group_columns()
self.meas.data_filtered = self.meas.data.copy()
self.meas.filter_clearsky(ghi_col='ws 2 ghi W/m^2')
self.assertLess(self.meas.data_filtered.shape[0],
self.meas.data.shape[0],
'Filtered dataframe should have less rows.')
self.assertEqual(self.meas.data_filtered.shape[1],
self.meas.data.shape[1],
'Filtered dataframe should have equal number of cols.')
for i, col in enumerate(self.meas.data_filtered.columns):
self.assertEqual(col, self.meas.data.columns[i],
'Filter changed column {} to '
'{}'.format(self.meas.data.columns[i], col))
def test_no_clear_sky(self):
with self.assertWarns(UserWarning):
self.meas.filter_clearsky(window_length=2)
class TestCapTestCpResultsSingleCoeff(unittest.TestCase):
"""Tests for the capactiy test results method using a regression formula
with a single coefficient."""
def setUp(self):
np.random.seed(9876789)
self.meas = pvc.CapData('meas')
self.sim = pvc.CapData('sim')
# self.cptest = pvc.CapTest(meas, sim, '+/- 5')
self.meas.rc = {'x': [6]}
nsample = 100
e = np.random.normal(size=nsample)
x = np.linspace(0, 10, 100)
das_y = x * 2
sim_y = x * 2 + 1
das_y = das_y + e
sim_y = sim_y + e
das_df = pd.DataFrame({'y': das_y, 'x': x})
sim_df = pd.DataFrame({'y': sim_y, 'x': x})
das_model = smf.ols(formula='y ~ x - 1', data=das_df)
sim_model = smf.ols(formula='y ~ x - 1', data=sim_df)
self.meas.regression_results = das_model.fit()
self.sim.regression_results = sim_model.fit()
self.meas.data_filtered = pd.DataFrame()
self.sim.data_filtered = pd.DataFrame()
def test_return(self):
res = pvc.captest_results(self.sim, self.meas, 100, '+/- 5',
print_res=False)
self.assertIsInstance(res,
float,
'Returned value is not a tuple')
class TestCapTestCpResultsMultCoeffKwVsW(unittest.TestCase):
"""
Setup and test to check automatic adjustment for kW vs W.
"""
def test_pvals_default_false_kw_vs_w(self):
np.random.seed(9876789)
meas = pvc.CapData('meas')
sim = pvc.CapData('sim')
# cptest = pvc.CapTest(meas, sim, '+/- 5')
meas.rc = pd.DataFrame({'poa': [6], 't_amb': [5], 'w_vel': [3]})
nsample = 100
e = np.random.normal(size=nsample)
a = np.linspace(0, 10, 100)
b = np.linspace(0, 10, 100) / 2.0
c = np.linspace(0, 10, 100) + 3.0
das_y = a + (a ** 2) + (a * b) + (a * c)
sim_y = a + (a ** 2 * 0.9) + (a * b * 1.1) + (a * c * 0.8)
das_y = das_y + e
sim_y = sim_y + e
das_df = pd.DataFrame({'power': das_y, 'poa': a,
't_amb': b, 'w_vel': c})
sim_df = pd.DataFrame({'power': sim_y, 'poa': a,
't_amb': b, 'w_vel': c})
meas.data = das_df
meas.data['power'] /= 1000
meas.set_regression_cols(power='power', poa='poa',
t_amb='t_amb', w_vel='w_vel')
fml = 'power ~ poa + I(poa * poa) + I(poa * t_amb) + I(poa * w_vel) - 1'
das_model = smf.ols(formula=fml, data=das_df)
sim_model = smf.ols(formula=fml, data=sim_df)
meas.regression_results = das_model.fit()
sim.regression_results = sim_model.fit()
meas.data_filtered = pd.DataFrame()
sim.data_filtered = pd.DataFrame()
actual = meas.regression_results.predict(meas.rc)[0] * 1000
expected = sim.regression_results.predict(meas.rc)[0]
cp_rat_test_val = actual / expected
with self.assertWarns(UserWarning):
cp_rat = pvc.captest_results(sim, meas, 100, '+/- 5',
check_pvalues=False, print_res=False)
self.assertAlmostEqual(cp_rat, cp_rat_test_val, 6,
'captest_results did not return expected value.')
class TestCapTestCpResultsMultCoeff(unittest.TestCase):
"""
Test captest_results function using a regression formula with multiple coef.
"""
def setUp(self):
np.random.seed(9876789)
self.meas = pvc.CapData('meas')
self.sim = pvc.CapData('sim')
# self.cptest = pvc.CapTest(meas, sim, '+/- 5')
self.meas.rc = | pd.DataFrame({'poa': [6], 't_amb': [5], 'w_vel': [3]}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import calendar
#Moving average
def MA(data=[], timeperiod=10):
ma = []
ma_a = pd.DataFrame(data,columns=['MA']).rolling(window=timeperiod).mean()
for i in ma_a['MA']:
ma.append(i)
return ma
#MACD related indicators
#Moving average: there will be unstable period in the beginning
#input: list of close price
def EMA(close=[], timeperiod=10):
ema = []
current = close[0]
for i in close:
current = (current*(timeperiod-1)+ 2*i)/(timeperiod+1)
ema.append(current)
return ema
def DIF(close=[], fastperiod=12, slowperiod=26):
dif = []
s_ema = EMA(close, slowperiod)
f_ema = EMA(close, fastperiod)
for i in range(len(close)):
dif.append(f_ema[i]-s_ema[i])
return dif
def DEA(close=[], fastperiod=12, slowperiod=26, signalperiod=9):
dif = DIF(close,fastperiod,slowperiod)
return EMA(dif, signalperiod)
def MACD(close=[], fastperiod=12, slowperiod=26, signalperiod=9):
macd = []
dif = DIF(close,fastperiod,slowperiod)
dea = EMA(dif, signalperiod)
for i in range(len(close)):
macd.append(2*(dif[i]-dea[i]))
return macd
# 夏普比率: 平均收益率/收益率标准差
#Sharpe Ratio: Sharpe ratio = Excess return / Standard deviation
#input:
# erp: Portfolio expected return rate
# within fixed timeperiod (e.g.yearly/monthly)
# rf: risk-free/expect rate of interest
def sharpe(erp=[], rf=0):
a = np.array(erp)
return (np.mean(a)-rf)/np.std(a,ddof=1)
#最大回撤率
#Max draw down ratio
#input:
# accumulated: accumulated money history
# period: To be added....
# >0 means short-term MADD within input period -> worth list
def MDD(accumulated=[],period=0):
current_mdd = mdd = 0
for i in range(len(accumulated)):
if period>0 and i>period:
j = i-period
else:
j = 0
if i > 0:
top = max(accumulated[int(j):int(i)])
current_mdd = (top - accumulated[i])/top
if mdd < current_mdd:
mdd = current_mdd
return mdd
#To be added:
#DMI related indicators
#KDJ
#RSI
#BIAS
#日净值历史
def daily_accumulated(principal,trade_history):
'''
Daily accumulated money
input:
principal: initial invest money
trade_history: stock buy-in and sell-out history. format should be
"index(title is none)","stock","buy_date","sell_date","holddays","profit","buy_money"
date format: %Y-%m-%d
output:
dict:{date:total_money} !!no order for dict
Daily accumulated history during whole trade
'''
df = trade_history[["stock","buy_date","sell_date","holddays","profit","buy_money"]]
start_date = min(dt.datetime.strptime(i, "%Y-%m-%d") for i in df["buy_date"].values)
end_date = max(dt.datetime.strptime(i, "%Y-%m-%d") for i in df["sell_date"].values)
datelist = [i.strftime('%Y-%m-%d') for i in pd.date_range(start_date, end_date)]
sell_history = {d:[] for d in datelist}
current_money = {d:0 for d in datelist}
for i in df.values:
selldate = i[2]
sell_history[dt.datetime.strptime(selldate,"%Y-%m-%d").strftime("%Y-%m-%d")].append(i)
current = principal
for date in datelist:
if len(sell_history[date]) > 0:
for sell_stock in sell_history[date]:
current = current + sell_stock[4]*sell_stock[5]
current_money[date] = current
'''
newdf = pd.DataFrame(data=[current_money[i] for i in datelist], \
index=datelist,columns=["totalmoney"])
newdf["date"] = newdf.index
newdf.plot(x="date", y="totalmoney", kind='area')
plt.savefig("positiongain_from_{}_to_{}.png".format(start_date.strftime("%Y_%m_%d"),end_date.strftime("%Y_%m_%d")))
plt.show()
'''
return current_money
#月净值历史, 月度年化收益率
#结算日:月末最后一天
def monthly_accumulated(principal,trade_history):
'''
Daily accumulated money
input:
principal: initial invest money
trade_history: stock buy-in and sell-out history. format should be
"index(title is none)","stock","buy_date","sell_date","holddays","profit","buy_money"
date format: %Y-%m-%d
output:
dict:{date:[total_money,growth_ratio,annual_yield]} !!no order for dict
Monthly accumulated history during whole trade
'''
daily = daily_accumulated(principal,trade_history)
date_list = sorted(daily.keys())
start_date = date_list[0]
end_date = date_list[-1]
enddatelist = [i.strftime('%Y-%m-%d') \
for i in pd.date_range(start_date, end_date, freq='M')]
monthlist = [i.strftime('%Y-%m') \
for i in | pd.date_range(start_date, end_date, freq='M') | pandas.date_range |
import os
import sys
import uuid
import math
import pickle
import pathlib
import getpass
from platform import uname
import pandas as pd
import numpy as np
import datetime as dt
from datetime import datetime
from collections import OrderedDict
from scipy.integrate import cumtrapz
from functools import reduce
import catboost
from catboost import CatBoostRegressor
from sklearn.svm import SVR
from sklearn.impute import KNNImputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.feature_selection import SelectKBest, chi2, SelectFromModel
from sklearn.metrics import mean_absolute_error, r2_score, mean_squared_error, accuracy_score
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV, RandomizedSearchCV
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, GradientBoostingRegressor, ExtraTreesRegressor, BaggingClassifier
from xgboost import XGBRegressor
import warnings
warnings.filterwarnings('ignore')
warnings.filterwarnings(action='ignore',category=DeprecationWarning)
warnings.filterwarnings(action='ignore',category=FutureWarning)
class Config(object):
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.WARNING)
FILE_HANDLER = logging.FileHandler('logfile.log')
FORMATTER = logging.Formatter('%(asctime)s : %(levelname)s : %(name)s : %(message)s')
FILE_HANDLER.setFormatter(FORMATTER)
LOGGER.addHandler(file_handler)
DATA = dict(
# BASE_DIR = pathlib.Path().resolve(),
DATASET_DIR = pathlib.Path().resolve() / "data/dengue",
EXPORT_DIR = pathlib.Path().resolve() / "data/dengue/exports",
)
ANALYSIS_CONFIG = dict(
OUTLIERS_COLS = ["precipitation_amt_mm", "reanalysis_precip_amt_kg_per_m2", "reanalysis_sat_precip_amt_mm", "station_precip_mm"]
)
MODELLING_CONFIG = dict(
TRAIN_COLS = ['year', 'weekofyear', 'ndvi_ne', 'ndvi_nw', 'ndvi_se',
'ndvi_sw', 'precipitation_amt_mm', 'reanalysis_air_temp_k',
'reanalysis_avg_temp_k', 'reanalysis_dew_point_temp_k',
'reanalysis_max_air_temp_k', 'reanalysis_min_air_temp_k',
'reanalysis_precip_amt_kg_per_m2',
'reanalysis_relative_humidity_percent', 'reanalysis_sat_precip_amt_mm',
'reanalysis_specific_humidity_g_per_kg', 'reanalysis_tdtr_k',
'station_avg_temp_c', 'station_diur_temp_rng_c', 'station_max_temp_c',
'station_min_temp_c', 'station_precip_mm'],
FEATURE_ENGINEER_COLS = ['low_season', 'rampup_season', 'high_season',
'reanalysis_specific_humidity_g_per_kg_1lag',
'reanalysis_specific_humidity_g_per_kg_2lag',
'reanalysis_specific_humidity_g_per_kg_3lag',
'reanalysis_dew_point_temp_k_1lag', 'reanalysis_dew_point_temp_k_2lag',
'reanalysis_dew_point_temp_k_3lag', 'reanalysis_min_air_temp_k_1lag',
'reanalysis_min_air_temp_k_2lag', 'reanalysis_min_air_temp_k_3lag',
'reanalysis_max_air_temp_k_1lag', 'reanalysis_max_air_temp_k_2lag',
'reanalysis_max_air_temp_k_3lag', 'station_min_temp_c_1lag',
'station_min_temp_c_2lag', 'station_min_temp_c_3lag',
'station_max_temp_c_1lag', 'station_max_temp_c_2lag',
'station_max_temp_c_3lag', 'reanalysis_air_temp_k_1lag',
'reanalysis_air_temp_k_2lag', 'reanalysis_air_temp_k_3lag',
'reanalysis_relative_humidity_percent_1lag',
'reanalysis_relative_humidity_percent_2lag',
'reanalysis_relative_humidity_percent_3lag'],
TUNING_METHOD = "random_search",
FEATURE_SELECTION_COLUMNS = ["RF", "Extratrees", "Kbest"],
)
class Analysis(Config):
data = {}
def __init__(self, city=["*"]):
self.city = city
def get_data(self):
logging.info("----------------------------------------------------------- PREPROCESSING ------------------------------------------------------------")
logging.info("Reading TRAIN Dataset:")
self.data["train_df"] = pd.read_csv(self.DATA["DATASET_DIR"] / 'merged_train.csv', index_col=0)
logging.info("Initiate Preprocessing of Train Data:")
logging.info(" - correct the datetime format in Train dataset...")
self.data["train_df"]["week_start_date"] = pd.to_datetime(self.data["train_df"]["week_start_date"])
self.data["train_df"] = self.data["train_df"].set_index("week_start_date")
for col in ["year", "weekofyear"]:
self.data["train_df"][col] = self.data["train_df"][col].astype(int)
logging.info(" - select 'sj' city data...")
self.data["sj_train_df"] = self.data["train_df"][self.data["train_df"]["city"]=="sj"]
self.data["sj_train_df"].reset_index(drop=True, inplace=True)
logging.info(" - fix incorrect maximum 'weekofyear' feature...")
self.data["sj_train_df"] = self.process_inconsistent(train=True)
logging.info(" - missing values imputation...")
self.data["sj_train_df"] = self.process_missing_values(train=True)
logging.info(" - outliers removal...")
self.data["sj_train_df"] = self.process_outliers(train=True)
logging.info("------------------------------------------------------- Done processing model data ------------------------------------------------------")
def process_inconsistent(self, train=True):
if train:
sj_df = self.data["sj_train_df"]
else:
sj_df = self.data["sj_test_df"]
logging.info(" - correct 'weekofyear' column where some year has maximum week number of '53' which are not right, will be corrected to '52'...")
for year in [2001, 2007, 2013]:
sj_df.loc[:,'weekofyear'] = np.where(sj_df["year"]==year, sj_df["weekofyear"]+1, sj_df["weekofyear"])
sj_df.loc[:,'weekofyear'] = np.where(sj_df["weekofyear"]>52, 1, sj_df["weekofyear"])
return sj_df
def process_missing_values(self, train=True):
if train:
sj_df = self.data["sj_train_df"]
else:
sj_df = self.data["sj_test_df"]
logging.info(" - using KNN Imputation model with 'n_neighbors'=5'...")
imputer = KNNImputer(n_neighbors=5)
num_sj_df = sj_df.select_dtypes(include=[np.number, "float64", "int64"])
num_sj_df = pd.DataFrame(imputer.fit_transform(num_sj_df), columns=num_sj_df.columns)
for col in num_sj_df.columns:
sj_df[col] = num_sj_df[col]
return sj_df
def iqr_based_filtering(self, df, col):
Q1 = df[col].quantile(0.25)
Q3 = df[col].quantile(0.75)
IQR = Q3 - Q1
lower_bound = Q1 - 1.5 * IQR
upper_bound = Q3 + 1.5 * IQR
df = df[(df[col]<lower_bound) |(df[col]>upper_bound)]
return df
def process_outliers(self, train=True):
if train:
sj_df = self.data["sj_train_df"]
else:
sj_df = self.data["sj_test_df"]
logging.info(" - using IQR based filtering to handle outliers...")
for col in self.ANALYSIS_CONFIG["OUTLIERS_COLS"]:
sj_outlier_removed_df = self.iqr_based_filtering(sj_df, col)
ol_col_list = ["city", "year"] + self.ANALYSIS_CONFIG["OUTLIERS_COLS"]
sub_sj_outlier_removed_df = sj_outlier_removed_df[ol_col_list]
merge_sj_outlier_removed_df = sj_df.merge(sub_sj_outlier_removed_df,
on=["city", "year"],
how="left",
suffixes=["", "_src"],
indicator="_join_ind")
for col in self.ANALYSIS_CONFIG["OUTLIERS_COLS"]:
sj_df.loc[merge_sj_outlier_removed_df["_join_ind"]=="both",col] = merge_sj_outlier_removed_df[col+"_src"]
return sj_df
class Train(Analysis):
data = {}
def __init__(self, target_var, train_df):
super().__init__()
self.data["model_df"] = train_df
self.meta = dict(
target_var = target_var,
stime = datetime.now(),
user = getpass.getuser(),
sys = uname()[1],
py = '.'.join(map(str, sys.version_info[:3])),
)
self.REGRESSION_MODELS = dict(
LINEAR = dict(alg=LinearRegression()),
RFR = dict(alg=RandomForestRegressor(), args=dict(randome_state=42, scaled=False),
param_grid={
"max_depth" : [None, 5, 10, 20, 35],
"max_features" : [2, 5, "auto"],
# "min_samples_leaf" : [2, 3, 4, 10],
"n_estimators" : [20, 50, 100, 200],
}),
XGBOOST = dict(alg=XGBRegressor(), args=dict(random_state=42, scaled=False),
param_grid={
"learning_rate":[0.01, 0.05, 0.1, 0.3],
"max_depth": [2, 3, 6, 10], # 3
"n_estimators": [20, 50, 200], # 100
}),
GRADIENT = dict(alg=GradientBoostingRegressor(), args=dict(random_state=42),
param_grid={
"n_estimators": [100, 150, 200, 400],
"learning_rate": [0.03, 0.1, 0.3],
'max_depth': [2, 4, 5, 6, 8],
}),
BAGGING = dict(alg=BaggingClassifier(), args=dict(random_state=42),
param_grid={
"n_estimators": [10, 30, 50, 100],
"max_features": [1, 5, 20, 100],
'max_samples': [1, 5, 20, 100],
}),
)
def get_model_data(self):
logging.info("------------------------------------------------------------- MODELLING -------------------------------------------------------------")
logging.info("Reading Processed Model Dataset:")
logging.info("Initiate Feature Engineering:")
logging.info(" - create dengue season categorical features...")
self.data["sj_train_df"] = self.data["model_df"]
cutoffs = [11,30]
self.data["sj_train_df"]['low_season'] = np.where((self.data["sj_train_df"].weekofyear<cutoffs[0]), 1, 0)
self.data["sj_train_df"]['rampup_season'] = np.where((self.data["sj_train_df"].weekofyear>=cutoffs[0]) &
(self.data["sj_train_df"].weekofyear<cutoffs[1]), 1, 0)
self.data["sj_train_df"]['high_season'] = np.where((self.data["sj_train_df"].weekofyear>=cutoffs[1]), 1, 0)
logging.info(" - create lag features of temperature and humidity...")
to_shift = ["reanalysis_specific_humidity_g_per_kg", "reanalysis_dew_point_temp_k", "reanalysis_min_air_temp_k",
"reanalysis_max_air_temp_k", "station_min_temp_c", "station_max_temp_c",
"reanalysis_air_temp_k", "reanalysis_relative_humidity_percent"]
for i in to_shift:
self.data["sj_train_df"][i+"_1lag"] = self.data["sj_train_df"][i].shift(-1)
self.data["sj_train_df"][i+"_2lag"] = self.data["sj_train_df"][i].shift(-2)
self.data["sj_train_df"][i+"_3lag"] = self.data["sj_train_df"][i].shift(-3)
self.data["sj_train_df"] = self.data["sj_train_df"].fillna(method="ffill")
logging.info("1. Run Base Dengue Prediction Model Without Feature Engineering & Feature Selection:")
logging.info(" - split the data into Train & Test data with original features...")
self.base_sj_X_train, self.base_sj_X_test, self.base_sj_y_train, self.base_sj_y_test = self.split_data(self.data["sj_train_df"],
input_cols=self.MODELLING_CONFIG["TRAIN_COLS"],
target="total_cases",
ratio=0.20)
self.data["base_sj_predict_df"] = pd.DataFrame()
self.base_sj_metrics_list = []
self.base_sj_model = []
for model_name in self.REGRESSION_MODELS:
if model_name == "LINEAR":
model, predict, predict_series, metrics = self.run_model(self.base_sj_X_train, self.base_sj_X_test,
self.base_sj_y_train, self.base_sj_y_test,
model_name,
tuning_method=None)
else:
model, predict, predict_series, metrics = self.run_model(self.base_sj_X_train, self.base_sj_X_test,
self.base_sj_y_train, self.base_sj_y_test,
model_name,
tuning_method=self.MODELLING_CONFIG["TUNING_METHOD"])
self.data["base_sj_predict_df"][f"{model_name}_total_cases"] = predict
self.base_sj_metrics_list.append(metrics)
self.base_sj_model.append(model)
self.data["base_sj_predict_df"].index = predict_series.index
self.data["base_sj_predict_df"]["y_test"] = self.base_sj_y_test
self.data["base_sj_predict_df"].reset_index(inplace=True)
self.data["base_sj_metrics_df"] = pd.DataFrame(self.base_sj_metrics_list)
self.data["base_sj_model"] = pd.DataFrame(self.base_sj_model).rename(columns={0: "Algorithm"})
logging.info("2. Run Feature Engineering Dengue Prediction Model:")
logging.info(" - split the data into Train & Test data with original & new features...")
self.fe_sj_X_train, self.fe_sj_X_test, self.fe_sj_y_train, self.fe_sj_y_test = self.split_data(self.data["sj_train_df"],
input_cols=self.MODELLING_CONFIG["TRAIN_COLS"] + self.MODELLING_CONFIG["FEATURE_ENGINEER_COLS"],
target="total_cases",
ratio=0.20)
self.data["fe_sj_predict_df"] = pd.DataFrame()
self.fe_sj_metrics_list = []
self.fe_sj_model = []
for model_name in self.REGRESSION_MODELS:
if model_name == "LINEAR":
model, predict, predict_series, metrics = self.run_model(self.fe_sj_X_train, self.fe_sj_X_test,
self.fe_sj_y_train, self.fe_sj_y_test,
model_name,
tuning_method=None)
else:
model, predict, predict_series, metrics = self.run_model(self.fe_sj_X_train, self.fe_sj_X_test,
self.fe_sj_y_train, self.fe_sj_y_test,
model_name,
tuning_method=self.MODELLING_CONFIG["TUNING_METHOD"])
self.data["fe_sj_predict_df"][f"{model_name}_total_cases"] = predict
self.fe_sj_metrics_list.append(metrics)
self.fe_sj_model.append(model)
self.data["fe_sj_predict_df"].index = predict_series.index
self.data["fe_sj_predict_df"]["y_test"] = self.fe_sj_y_test
self.data["fe_sj_predict_df"].reset_index(inplace=True)
self.data["fe_sj_metrics_df"] = pd.DataFrame(self.fe_sj_metrics_list)
self.data["fe_sj_model"] = pd.DataFrame(self.fe_sj_model).rename(columns={0: "Algorithm"})
logging.info("3. Run Feature Selection Dengue Prediction Model:")
self.sj_top_20_features, self.data["sj_score_table"] = self.feature_selection_scores(self.base_sj_X_train, self.base_sj_y_train)
logging.info(" - split the data into Train & Test data using selected features...")
self.fs_sj_X_train, self.fs_sj_X_test, self.fs_sj_y_train, self.fs_sj_y_test = self.split_data(self.data["sj_train_df"],
input_cols=self.sj_top_20_features,
target="total_cases",
ratio=0.20)
self.data["fs_sj_predict_df"] = pd.DataFrame()
self.fs_sj_metrics_list = []
self.fs_sj_model = []
for model_name in self.REGRESSION_MODELS:
if model_name == "LINEAR":
model, predict, predict_series, metrics = self.run_model(self.fs_sj_X_train, self.fs_sj_X_test,
self.fs_sj_y_train, self.fs_sj_y_test,
model_name,
tuning_method=None)
else:
model, predict, predict_series, metrics = self.run_model(self.fs_sj_X_train, self.fs_sj_X_test,
self.fs_sj_y_train, self.fs_sj_y_test,
model_name,
tuning_method=self.MODELLING_CONFIG["TUNING_METHOD"])
self.data["fs_sj_predict_df"][f"{model_name}_total_cases"] = predict
self.fs_sj_metrics_list.append(metrics)
self.fs_sj_model.append(model)
self.data["fs_sj_predict_df"].index = predict_series.index
self.data["fs_sj_predict_df"]["y_test"] = self.fs_sj_y_test
self.data["fs_sj_predict_df"].reset_index(inplace=True)
self.data["fs_sj_metrics_df"] = pd.DataFrame(self.fs_sj_metrics_list)
self.data["fs_sj_model"] = pd.DataFrame(self.fs_sj_model).rename(columns={0: "Algorithm"})
logging.info("---------------------- Done modelling using LINEAR, RANDOM FOREST, XGBOOST & GRADIENT BOOSTING on 'total_cases' ----------------------")
def split_data(self, df, input_cols=[], target="total_cases", ratio=0.30):
X = df[input_cols]
y = df[target]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=ratio, random_state=42)
return X_train, X_test, y_train, y_test
@staticmethod
def root_mean_square_error(actual, pred):
rmse = math.sqrt(mean_absolute_error(actual, pred))
return rmse
def evaluate(self, actual, pred):
R2 = r2_score(actual, pred)
MAE = mean_absolute_error(actual, pred)
RMSE = self.root_mean_square_error(actual, pred)
metrics = dict(MAE=MAE, RMSE=RMSE, R2_Score=R2)
return metrics
def run_model(self, x_train, x_test, y_train, y_test, model_name, tuning_method):
if model_name == "LINEAR":
model_type = self.REGRESSION_MODELS["LINEAR"]
alg = model_type["alg"]
elif model_name == "RFR":
model_type = self.REGRESSION_MODELS["RFR"]
alg = model_type["alg"]
elif model_name == "XGBOOST":
model_type = self.REGRESSION_MODELS["XGBOOST"]
alg = model_type["alg"]
elif model_name == "GRADIENT":
model_type = self.REGRESSION_MODELS["GRADIENT"]
alg = model_type["alg"]
elif model_name == "BAGGING":
model_type = self.REGRESSION_MODELS["BAGGING"]
alg = model_type["alg"]
if tuning_method == None:
model = alg
elif tuning_method == "grid_search":
alg_tuned = GridSearchCV(estimator=alg,
param_grid=model_type["param_grid"],
cv=5,
verbose=0)
elif tuning_method == "random_search":
alg_tuned = RandomizedSearchCV(estimator=alg,
param_distributions=model_type["param_grid"],
cv=5,
verbose=0)
if tuning_method == None:
model.fit(x_train, y_train)
else:
alg_tuned.fit(x_train, y_train)
model = alg.set_params(**alg_tuned.best_params_)
model.fit(x_train, y_train)
predict = model.predict(x_test)
predict_series = pd.Series(predict, index=y_test.index)
metrics = self.evaluate(y_test, predict)
metrics["MODEL"] = model_name
return model, predict, predict_series, metrics
def random_forest_selection(self, x, y):
alg = RandomForestRegressor()
alg.fit(x, y)
preds = alg.predict(x)
accuracy = r2_score(preds, y)
rf_fi = pd.DataFrame(alg.feature_importances_, columns=["RF"], index=x.columns)
rf_fi = rf_fi.reset_index().sort_values(['RF'],ascending=0)
return rf_fi
def extratrees_selection(self, x, y):
alg = ExtraTreesRegressor()
alg.fit(x, y)
extratrees_fi = pd.DataFrame(alg.feature_importances_, columns=["Extratrees"], index=x.columns)
extratrees_fi = extratrees_fi.reset_index().sort_values(['Extratrees'],ascending=0)
return extratrees_fi
def kbest_selection(self, x, y):
model = SelectKBest(score_func=chi2, k=5)
alg = model.fit(x.abs(), y)
pd.options.display.float_format = '{:.2f}'.format
kbest_fi = pd.DataFrame(alg.scores_, columns=["Kbest"], index=x.columns)
kbest_fi = kbest_fi.reset_index().sort_values('Kbest',ascending=0)
return kbest_fi
def feature_selection_scores(self, x, y):
try:
logging.info("- feature selection through Random Forest Regressor...")
rf_fi = self.random_forest_selection(x, y)
except MemoryError:
print("- feature selection through Random Forest Regressor not run due to laptop memory issue...")
logging.info("- feature selection through Extratrees Regressor...")
extratrees_fi = self.extratrees_selection(x, y)
logging.info("- feature selection through K-Best...")
kbest_fi = self.kbest_selection(x, y)
logging.info("Creating feature selection table to acquire the right features")
dfs = [rf_fi, extratrees_fi, kbest_fi]
features_final_results = reduce(lambda left,right: | pd.merge(left, right, on='index') | pandas.merge |
from typing import Dict, List, Sequence
from whoosh import index
from whoosh.index import create_in, EmptyIndexError
from whoosh.fields import *
from whoosh.qparser import MultifieldParser
from whoosh.filedb.filestore import RamStorage, FileStorage, copy_to_ram
from whoosh.analysis import StemmingAnalyzer
import json
# import pandas as pd
import sys
import sqlite3
import pandas as pd
data_path = 'data/fa22_courses.db'
load_classes_query = 'SELECT subject,number,name,credit_hours,label,description,gpa,yearterm,degree_attributes FROM classes '
# data_path = 'data/class_data.db'
# code from https://github.com/darenr/python-whoosh-simple-example/blob/master/example.py
class SearchEngine:
def __init__(self, schema, docs=None):
self.schema = schema
schema.add('raw', TEXT(stored=True))
# Load index from file storage
# try loading index. IF unsuccessful, load index into Ram storage.
try:
storage = copy_to_ram(FileStorage('data/index'))
self.ix = storage.open_index()
except EmptyIndexError:
docs = pd.DataFrame()
with sqlite3.connect(data_path) as conn:
# Use sql query to load docs into pandas dataframe.
docs = | pd.read_sql(load_classes_query, conn) | pandas.read_sql |
#!/usr/bin/env python
# coding: utf8
'''Script to parse individual annotator responses into a well-formed CSV file.
Example
-------
$ ./scripts/parse_individual_responses.py \
"path/to/dir/*.csv" \
openmic-2018-individual-responses.csv
'''
from __future__ import print_function
import argparse
import glob
import json
import os
import pandas as pd
import sys
import tqdm
import uuid
import warnings
YN_MAP = {'no': 0, 'yes': 1}
OUTPUT_COLUMNS = ['sample_key', 'worker_id', 'worker_trust',
'channel', 'instrument', 'response']
CONTAIN_COL = 'does_this_recording_contain_{}'
def parse_one(row):
'''
Parameters
----------
row : pd.Series
Series record with at least the following fields:
[_channel, _trust, _worker_id, instrument, sample_key,
does_this_recording_contain_{instrument}]
Returns
-------
resp : dict
Object with the following fields:
[sample_key, worker_id, worker_trust, channel, instrument, response]
'''
response = YN_MAP.get(row[CONTAIN_COL.format(row.instrument)])
if response is None:
warnings.warn("Null response: {}".format(row.tolist()))
return dict(sample_key=row.sample_key, worker_id=row._worker_id,
worker_trust=row._trust, channel=row._channel,
instrument=row.instrument, response=response)
def encrypt_field(values, hashlen=8, retries=5):
unique_values = set(values)
for n in range(retries):
hashmap = {val: str(uuid.uuid4()).replace('-', '')[:hashlen]
for val in list(unique_values)}
unique_hashes = set(hashmap.values())
if len(unique_hashes) == len(unique_values):
# No collisions, we're done here.
break
if len(unique_hashes) != len(unique_values):
raise ValueError('hashlen={} has caused collisions, try increasing.'
.format(hashlen))
print('Encrypted {} unique values'.format(len(hashmap)))
return list(map(hashmap.get, values)), hashmap
def main(csv_files, output_filename, hashfile=None):
records = []
for csv_file in tqdm.tqdm(csv_files):
records += pd.read_csv(csv_file).apply(parse_one, axis=1).values.tolist()
df = | pd.DataFrame.from_records(records) | pandas.DataFrame.from_records |
import datetime
import pandas as pd
import numpy as np
import dtcj
def load_merged(trans=None, prod=None) -> pd.DataFrame():
if not trans:
trans = | pd.read_csv('data/transaction_data.csv') | pandas.read_csv |
# from glob import glob
from pathlib import Path
from numpy import nan
from functools import reduce
from typing import List, Dict, Union
import pandas as pd
import pandas.io.formats.excel
pandas.io.formats.excel.ExcelFormatter.header_style = None
class CollateScores:
"""Collate all scoresheets in a given folder into a Consolidated marks spreadsheet."""
def __init__(self, scoresheets_path: Path, out_filename: str):
self.scoresheets_path: Path = Path(scoresheets_path)
self.out_filename: Path = self.scoresheets_path / f'{out_filename}.xlsx'
self.all_dfs: List[pd.DataFrame] = []
self.all_papers_scores: List[pd.DataFrame] = []
self.all_papers_diving: List[pd.DataFrame] = []
def group_all_scoresheets(self) -> List[pd.DataFrame]:
grouped_dfs = []
for scoresheet in self.scoresheets_path.glob('*GROUP*.xlsx'):
print(scoresheet.name)
JS = JudgeScores(scoresheet)
JS.get_judge()
JS.read_scores()
modified_scores = JS.calculate_formulas()
# modified_scores = JS.diving_scores()
# ToDo:
# - separate func calls
# - concat formulas and store in self.judge_calculations
output = modified_scores
grouped_dfs.append(output)
return grouped_dfs
def merge_group_scores(self, group_frames: List[pd.DataFrame]) -> pd.DataFrame:
# get unique group values
cols = JudgeScores.data_columns[:-1] # ['ID', 'Ref', 'Paper']
merged_group = reduce(
lambda left, right: pd.merge(
left, right.drop(cols, axis=1),
left_index=True,
right_index=True
), group_frames
)
def reduce_list(a: List[str], b: List[str]) -> List[str]:
return list(set(a)-set(b))
# get only judge score columns by removing ID, Ref and Paper
judge_score_cols = reduce_list(merged_group.columns, cols)
jsc = merged_group[judge_score_cols]
# apply group level formulas and create columns
merged_group['GroupAverageScore'] = jsc.mean(axis=1)
diving_style_formula = \
(jsc.sum(axis=1) -
(jsc.min(axis=1) + jsc.max(axis=1))) / \
(jsc.count(axis=1) - 2)
merged_group['GroupDivingStyle'] = diving_style_formula
def split_diving_style() -> None:
# get only group score cols
group_score_columns = reduce_list(
merged_group.columns, judge_score_cols)
gsc = merged_group[group_score_columns]
# split scores and diving style
rows = len(gsc.index)
split = int(rows / 2)
non_diving = gsc.iloc[:split, :]
diving = gsc.iloc[split:, :]
# append to separate lists to merge and rank separately
self.all_papers_scores.append(non_diving)
self.all_papers_diving.append(diving)
split_diving_style()
return merged_group
@staticmethod
def rank_scored_papers(pscores) -> pd.DataFrame:
apsc = pd.concat(pscores, axis='index')
apsc['Rank'] = apsc['GroupAverageScore'].rank(ascending=False)
apsc['DivingRank'] = apsc['GroupDivingStyle'].rank(ascending=False)
apsc.sort_values('Rank', ascending=True, inplace=True)
return apsc
def concatenate_shortlist(self) -> pd.DataFrame:
straight = self.rank_scored_papers(self.all_papers_scores)
diving = self.rank_scored_papers(self.all_papers_diving)
col_order = ['GroupDivingStyle', 'DivingRank',
'GroupAverageScore', 'ID', 'Paper', 'Ref', 'Rank']
# change col order and sort if straight DivingStyle calculates correctly
if not straight['DivingRank'].isnull().values.any():
straight.sort_values('DivingRank', ascending=True, inplace=True)
alt_order = ['GroupAverageScore', 'Rank',
'GroupDivingStyle', 'ID', 'Paper', 'Ref', 'DivingRank']
straight = straight[alt_order]
else:
straight = straight[col_order]
col_order.reverse()
diving = diving[col_order]
diving.reset_index(drop=True, inplace=True)
straight.reset_index(drop=True, inplace=True)
diving.add_suffix('_alt')
return pd.concat([straight, diving], axis=1)
@staticmethod
def format_scores(wkb, wks) -> None:
wks.set_column('A:B', 14)
wks.set_column('C:C', 55) # set papers col widest
wks.set_column('D:Z', 18)
header_format = wkb.add_format(
{'bold': True, 'bg_color': '#000000', 'font_color': '#ffffff'})
scores_format = wkb.add_format({'bg_color': '#C6EFCE'})
wks.conditional_format(
'A1:Z1', {'type': 'no_blanks', 'format': header_format}
)
wks.conditional_format(
'D2:Z200', {'type': 'no_blanks', 'format': scores_format}
)
def write_scores(self, n: int, writer: pd.ExcelWriter) -> str:
sh = f"Group {n}"
# filter dataframes with keys matching group number
frames = list(filter(lambda fr: list(fr.keys())[0] == n, self.all_dfs))
group_scores = [list(frm.values())[0] for frm in frames]
merged_scores = self.merge_group_scores(group_scores)
merged_scores.to_excel(writer, sheet_name=sh, index=False)
return sh
@staticmethod
def format_shortlist(wkb, wks) -> None:
pass
# TODO:
# - add width to papers column
# - add conditional format to top 20 of each Ref col
def write_shortlist(self, writer: pd.ExcelWriter) -> str:
shortlist_name = 'Shortlist calculation'
shortlist = self.concatenate_shortlist()
shortlist.to_excel(
writer, sheet_name=shortlist_name, index=False)
return shortlist_name
def write_consolidated_marks(self) -> None:
# make a list of unique group numbers
groups = list(set(list(frm.keys())[0] for frm in self.all_dfs))
with pd.ExcelWriter(self.out_filename) as xlwriter:
workbook = xlwriter.book
for num in groups:
sheetname = self.write_scores(num, xlwriter)
print(sheetname)
self.format_scores(workbook, xlwriter.sheets[sheetname])
shortlist_sheet = self.write_shortlist(xlwriter)
self.format_shortlist(workbook, xlwriter.sheets[shortlist_sheet])
def __call__(self) -> Path:
self.all_dfs = self.group_all_scoresheets()
self.write_consolidated_marks()
return self.out_filename
class JudgeScores:
"""Read score data, judge details and calculations based on scores from scoresheet."""
data_columns = ['ID', 'Ref', 'Paper', 'Score']
def __init__(self, scoresheet: Path):
self.scoresheet = Path(scoresheet)
self.judge_scores = None
self.judge: Dict[str, Union[str, int]] = {}
def get_judge(self):
"""Get the judges name, group and category from the scoresheet."""
info = self.scoresheet.stem.split(' - ')
info_items = len(info)
keys = ['Judge', 'Category', 'Group']
if info_items == 3:
judge_info = dict(zip(keys, info))
elif info_items == 2:
judge_info = dict(zip(keys, [info[0], None, info[1]]))
else:
print(f'Incorrect filename info from {self.scoresheet.name}', info)
judge_info = None
if judge_info:
try:
# Extract group integer from end of filename
judge_info['Group'] = int(''.join(
filter(str.isdigit, judge_info['Group'])))
self.judge.update(judge_info)
except TypeError as e:
raise e
return judge_info
def read_scores(self):
"""Read the scores from the scoresheet and return a dataframe."""
df = | pd.read_excel(self.scoresheet, index_col=None, header=None) | pandas.read_excel |
import os
from glob import glob
import numpy as np
import pandas as pd
import re
from .utils import data_table
from neslter.parsing.files import Resolver
from neslter.parsing.ctd.hdr import compile_hdr_files
from neslter.parsing.underway import Underway
# keys
INSTRUMENT = 'Instrument'
ACTION = 'Action'
COMMENT = 'Comment'
STATION = 'Station'
CAST = 'Cast'
DATETIME = 'dateTime8601'
LAT='Latitude'
LON='Longitude'
MESSAGE_ID = 'Message ID'
RECOVER_ACTION = 'recover'
EN608='en608'
EN617='en617'
IMPELLER_PUMP = 'impeller pump'
DIAPHRAGM_PUMP = 'diaphragm pump'
INCUBATION = 'Incubation'
PUMP_TYPE = 'pump_type'
TOI_DISCRETE = 'TOI discrete'
UNDERWAY_SCIENCE_SEAWATER = 'Underway Science seawater'
USS_IMPELLER = 'Underway Science seawater impeller'
USS_DIAPHRAGM = 'Underway Science seawater diaphragm pump'
CTD_INSTRUMENT = 'CTD911'
ELOG_COLUMNS = [MESSAGE_ID, DATETIME, INSTRUMENT, ACTION, STATION, CAST, LAT, LON, COMMENT]
COLUMNS_WO_MESSAGE_ID = [DATETIME, INSTRUMENT, ACTION, STATION, CAST, LAT, LON, COMMENT]
def elog_path(cruise):
elog_dir = Resolver().raw_directory('elog', cruise)
candidates = glob(os.path.join(elog_dir, 'R2R_ELOG_*_FINAL_EVENTLOG*.csv'))
assert len(candidates) == 1, 'cannot find event log at {}'.format(elog_dir)
return candidates[0]
def hdr_path(cruise):
try:
return Resolver().raw_directory('ctd', cruise)
except KeyError:
return None
def sidecar_file_path(cruise, filename):
try:
elog_dir = Resolver().raw_directory('elog', cruise)
except KeyError:
return None
path = os.path.join(elog_dir, filename)
if not os.path.exists(path):
return None
return path
def toi_path(cruise):
filename = '{}_TOI_underwaysampletimes.txt'.format(cruise.capitalize())
return sidecar_file_path(cruise, filename)
def corrections_path(cruise):
filename = 'R2R_ELOG_{}_corrections.xlsx'.format(cruise)
return sidecar_file_path(cruise, filename)
def additions_path(cruise):
filename = 'R2R_ELOG_{}_additions.xlsx'.format(cruise)
return sidecar_file_path(cruise, filename)
class EventLog(object):
def __init__(self, cruise):
self.cruise = cruise
self.parse(cruise)
def parse(self, cruise):
ep = elog_path(cruise)
self.df = parse_elog(ep)
corr_path = corrections_path(cruise)
if corr_path is not None:
self.apply_corrections(corr_path)
addns_path = additions_path(cruise)
if addns_path is not None:
self.apply_additions(addns_path)
hdr_dir = hdr_path(cruise)
tp = toi_path(cruise)
if tp is not None:
self.remove_action(TOI_DISCRETE)
self.add_events(clean_toi_discrete(tp))
if hdr_dir is not None:
self.add_ctd_deployments(hdr_dir)
self.add_underway_locations()
self.fix_incubation_cast_numbers()
def add_events(self, events):
self.df = pd.concat([self.df, events], sort=True).sort_values(DATETIME)
def remove_recover_events(self, instrument):
self.remove_action(RECOVER_ACTION, instrument)
def remove_instrument(self, instrument):
self.df = self.df[~(self.df[INSTRUMENT] == instrument)]
def remove_action(self, action, instrument=None):
new_df = self.df.copy()
if instrument is not None:
new_df = new_df[~((new_df[ACTION] == action) & (new_df[INSTRUMENT] == instrument))]
else:
new_df = new_df[new_df[ACTION] != action]
self.df = new_df
def remove_ctd_recoveries(self):
self.remove_recover_events(CTD_INSTRUMENT)
def fix_incubation_cast_numbers(self):
df = self.df.copy()
slic = (df[INSTRUMENT] == INCUBATION) & ~(df[CAST].isna())
df.loc[slic, CAST] = df.loc[slic, CAST].astype('str').str.replace('C','').astype(int)
self.df = df
def add_ctd_deployments(self, hdr_dir):
self.remove_ctd_recoveries()
hdr = self.parse_ctd_hdrs(hdr_dir)
self.remove_instrument(CTD_INSTRUMENT)
self.add_events(hdr)
def apply_corrections(self, corr_path):
corr = pd.read_excel(corr_path)
corr[DATETIME] = pd.to_datetime(corr[DATETIME], utc=True)
corr.pop('Instrument')
corr.pop('Action')
merged = self.df.merge(corr, on=MESSAGE_ID, how='left')
DATETIME_X = '{}_x'.format(DATETIME)
DATETIME_Y = '{}_y'.format(DATETIME)
merged[DATETIME] = pd.to_datetime(merged[DATETIME_Y].combine_first(merged[DATETIME_X]), utc=True)
self.df = merged
def apply_additions(self, addns_path):
addns = pd.read_excel(addns_path)
addns[DATETIME] = | pd.to_datetime(addns[DATETIME], utc=True) | pandas.to_datetime |
"""
This script runs time-series forecasting via ARIMA. It contains all the methods necessary
to simulate a time series forecasting task. Below simulation uses daily minimum temp. and tries
to predict min. temp. for given date.
Steps to follow:
1. Pre-processing raw data. `preprocess_raw_data()`
2. Splitting data into two as train and test. `split_data(2)`
3. Fit model to the train dataset and save model object. `fit_and_save_model()`
4. Make prediction for test datasets. `predict_test_wt_arima()`
5. Measure the accuracy of predictions for test period. Afterwards save it
to the local. `measure_accuracy()`
6. Use forecast function to have a point estimate for a given date.
`forecast_wt_arima_for_date(input_date)`
What is ARIMA?
ARIMA is the most common method used in time series forecasting. It is an acronym for
AutoregRessive Integrated Moving Average. ARIMA is a model that can be fitted to time series data
in order to better understand or predict future points in the series.
Details of the dataset:
This dataset describes the minimum daily temperatures over 10 years (1981-1990)
in the city Melbourne, Australia.
The units are in degrees Celsius and there are 3650 observations.
The source of the data is credited as the Australian Bureau of Meteorology
"""
import logging
import math
import os
import pandas as pd
import pickle
import sqlite3
from pmdarima import auto_arima
from datetime import date
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
def run_training():
#TODO
preprocess_raw_data()
split_data(2)
fit_and_save_model()
predict_test_wt_arima()
measure_accuracy()
def run_prediction():
#TODO
forecast_wt_arima_for_date(str(date.today()))
def read_data(df_phase):
"""
This function reads necessary data from local for the steps of the simulation.
:param df_phase: Read data for which step of the simulation.
Options: ['raw_data', 'processed', 'train_model', 'test_model', 'train_predicted',
'test_predicted']
:return: DataFrame read from local.
"""
if df_phase == 'raw_data':
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
data_path = os.path.join(repo_path, 'data', 'raw_data', 'daily_minimum_temp.csv')
df = pd.read_csv(data_path, error_bad_lines=False)
elif df_phase == 'processed':
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
data_path = os.path.join(repo_path, 'data', 'interim', 'processed_df.csv')
df = pd.read_csv(data_path, error_bad_lines=False)
elif df_phase == 'train_model':
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
data_path = os.path.join(repo_path, 'data', 'interim', 'train_df.csv')
df = pd.read_csv(data_path, error_bad_lines=False)
elif df_phase == 'test_model':
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
data_path = os.path.join(repo_path, 'data', 'interim', 'test_df.csv')
df = pd.read_csv(data_path, error_bad_lines=False)
elif df_phase == 'test_predicted':
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
data_path = os.path.join(repo_path, 'data', 'interim', 'predicted_test.csv')
df = pd.read_csv(data_path, error_bad_lines=False)
return df
def preprocess_raw_data():
"""
Reads raw data from local and makes pre-processing necessary to use dataset with ARIMA.
Function assumes that the date column is named as 'Date'. It saves prep-processed dataset
the local.
"""
raw_df = read_data('raw_data')
raw_df['Date'] = list(map(lambda x: pd.to_datetime(x), raw_df['Date']))
raw_df = raw_df.sort_values('Date')
procesed_df = raw_df.rename(index=str,
columns={'Daily minimum temperatures in Melbourne, '
'Australia, 1981-1990': 'y'})
for sub in procesed_df['y']:
if '?' in sub:
procesed_df.loc[procesed_df['y'] == sub, 'y'] = sub.split('?')[1]
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
data_path = os.path.join(repo_path, 'data', 'interim', 'processed_df.csv')
os.makedirs(os.path.dirname(data_path), exist_ok=True)
procesed_df.to_csv(path_or_buf=data_path, index=False, header=True)
def split_data(n_weeks_to_test=2):
"""
Reads preprocessed data from local and splits it to test/train and saves it to
local. test_df.csv and train_df.csv can be found under `data/interim` path.
:param n_weeks_to_test: Number of weeks for the test data. Default is 2.
"""
preprocessed_data = read_data('processed')
n_days_for_test = n_weeks_to_test * 7
test_df = preprocessed_data[-n_days_for_test:]
train_df = preprocessed_data[:-n_days_for_test]
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
data_path = os.path.join('data', 'interim')
os.makedirs(os.path.dirname(data_path), exist_ok=True)
combined_path_test = os.path.join(repo_path, data_path, 'test_df.csv')
combined_path_train = os.path.join(repo_path, data_path, 'train_df.csv')
train_df.to_csv(path_or_buf=combined_path_train, index=False, header=True)
test_df.to_csv(path_or_buf=combined_path_test, index=False, header=True)
def fit_and_save_model():
"""
Runs Prophet for the train dataframe. It reads data from local and saves the model
object to the local. Model can be found under `data/model/arima.pkl`
"""
train_df = read_data('train_model')
train_df['Date'] = list(map(lambda x: pd.to_datetime(x), train_df['Date']))
train_df = train_df.set_index('Date')
model = auto_arima(train_df, start_p=1, start_q=1,
test='adf',
max_p=1, max_q=1, m=12,
start_P=0, seasonal=True,
d=None, D=1, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True)
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
model_path = os.path.join(repo_path, 'data', 'model', 'arima.pkl')
os.makedirs(os.path.dirname(model_path), exist_ok=True)
with open(model_path, "wb") as f:
pickle.dump(model, f)
def predict_test_wt_arima():
"""
Reads test dataframe and model object from local and makes prediction.
Data with predicted values for test dataframe will be saved to local.
"""
test_df = read_data('test_model')
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
model_path = os.path.join(repo_path, 'data', 'model', 'arima.pkl')
with open(model_path, 'rb') as f:
model = pickle.load(f)
fitted, confint = model.predict(n_periods=len(test_df), return_conf_int=True)
predicted_test = pd.merge(
pd.DataFrame(fitted), pd.DataFrame(confint), right_index=True, left_index=True)
predicted_test = predicted_test.rename(index=str,
columns={'0_x': 'yhat',
'0_y': 'yhat_lower',
1: 'yhat_upper'})
data_path = os.path.join(repo_path, 'data', 'interim')
combined_path_test = os.path.join(data_path, 'predicted_test.csv')
predicted_test.to_csv(path_or_buf=combined_path_test, index=False, header=True)
def calculate_mape(y, yhat):
"""
Calculates Mean Average Percentage Error.
:param y: Actual values as series
:param yhat: Predicted values as series
:return: MAPE as percentage
"""
y = y.replace(0, np.nan)
error_daily = y - yhat
abs_daily_error = list(map(abs, error_daily))
relative_abs_daily_error = abs_daily_error / y
mape = (np.nansum(relative_abs_daily_error) / np.sum(~np.isnan(y)))*100
return mape
def calculate_rmse(y, yhat):
"""
Calculates Root Mean Square Error
:param y: Actual values as series
:param yhat: Predicted values as series
:return: RMSE value
"""
error_sqr = (y - yhat)**2
error_sqr_rooted = list(map(lambda x: math.sqrt(x), error_sqr))
rmse = sum(error_sqr_rooted) / len(error_sqr_rooted)
return rmse
def measure_accuracy():
"""
Uses the above defined accuracy metrics and calculates accuracy for both test series in
terms of MAPE and RMSE. Saves those results to local as a csv file.
:return: A dictionary with accuracy metrics for test dataset.
"""
test_df = read_data('test_model')
predicted_test = read_data('test_predicted')
mape_test = calculate_mape(test_df['y'], predicted_test['yhat'])
rmse_test = calculate_rmse(test_df['y'], predicted_test['yhat'])
days_in_test = len(test_df)
accuracy_dict = {'mape_test': [mape_test],
'rmse_test': [rmse_test],
'days_in_test': [days_in_test]}
acc_df = pd.DataFrame(accuracy_dict)
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
acc_path = os.path.join(repo_path, 'data', 'model', 'accuracy.csv')
acc_df.to_csv(path_or_buf=acc_path, index=False, header=True)
return acc_df.to_dict('index')[0]
def forecast_wt_arima_for_date(input_date):
"""
:param input_date: A date as a string in ISO format (yyyy-mm-dd).
:return: Dictionary with the forecasted values.
`yhat`: Forecasted value for given date.
`yhat_upper`: Forecasted upper value for given date & confidence intervals.
`yhat_lower`: Forecasted lower value for given date & confidence intervals.
"""
logging.info("Computing forecast for %s", input_date)
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
model_path = os.path.join(repo_path, 'data', 'model', 'arima.pkl')
test_df = read_data('test_model')
min_test_date = pd.to_datetime(test_df.index.min())
date_diff = pd.to_datetime(input_date) - min_test_date
with open(model_path, 'rb') as f:
model = pickle.load(f)
fitted, confint = model.predict(n_periods=date_diff.days, return_conf_int=True)
forecast_results = pd.merge(
pd.DataFrame(fitted), | pd.DataFrame(confint) | pandas.DataFrame |
import torch
import torch.optim as optim
import torch.nn as nn
from time import time
from os import path
from copy import copy, deepcopy
import pandas as pd
import numpy as np
import torch.nn.init as init
import os
class CrossValidationSplit:
"""A class to create training and validation sets for a k-fold cross validation"""
def __init__(self, dataset, cv=5, stratified=False, shuffle_diagnosis=False, val_prop=0.10):
"""
:param dataset: The dataset to use for the cross validation
:param cv: The number of folds to create
:param stratified: Boolean to choose if we have the same repartition of diagnosis in each fold
"""
self.stratified = stratified
subjects_df = dataset.subjects_df
if type(cv) is int and cv > 1:
if stratified:
n_diagnosis = len(dataset.diagnosis_code)
cohorts = np.unique(dataset.subjects_df.cohort.values)
preconcat_list = [[] for i in range(cv)]
for cohort in cohorts:
for diagnosis in range(n_diagnosis):
diagnosis_key = list(dataset.diagnosis_code)[diagnosis]
diagnosis_df = subjects_df[(subjects_df.diagnosis == diagnosis_key) &
(subjects_df.cohort == cohort)]
if shuffle_diagnosis:
diagnosis_df = diagnosis_df.sample(frac=1)
diagnosis_df.reset_index(drop=True, inplace=True)
for fold in range(cv):
preconcat_list[fold].append(diagnosis_df.iloc[int(fold * len(diagnosis_df) / cv):int((fold + 1) * len(diagnosis_df) / cv):])
folds_list = []
for fold in range(cv):
fold_df = pd.concat(preconcat_list[fold])
folds_list.append(fold_df)
else:
folds_list = []
for fold in range(cv):
folds_list.append(subjects_df.iloc[int(fold * len(subjects_df) / cv):int((fold + 1) * len(subjects_df) / cv):])
self.cv = cv
elif type(cv) is float and 0 < cv < 1:
if stratified:
n_diagnosis = len(dataset.diagnosis_code)
cohorts = np.unique(dataset.subjects_df.cohort.values)
train_list = []
validation_list = []
test_list = []
for cohort in cohorts:
for diagnosis in range(n_diagnosis):
diagnosis_key = list(dataset.diagnosis_code)[diagnosis]
diagnosis_df = subjects_df[(subjects_df.diagnosis == diagnosis_key) &
(subjects_df.cohort == cohort)]
if shuffle_diagnosis:
diagnosis_df = diagnosis_df.sample(frac=1)
diagnosis_df.reset_index(drop=True, inplace=True)
train_list.append(diagnosis_df.iloc[:int(len(diagnosis_df) * cv * (1-val_prop)):])
validation_list.append(diagnosis_df.iloc[int(len(diagnosis_df) * cv * (1-val_prop)):
int(len(diagnosis_df) * cv):])
test_list.append(diagnosis_df.iloc[int(len(diagnosis_df) * cv)::])
train_df = pd.concat(train_list)
validation_df = | pd.concat(validation_list) | pandas.concat |
'''
Created on May 16, 2018
@author: cef
significant scripts for calculating damage within the ABMRI framework
for secondary data loader scripts, see fdmg.datos.py
'''
#===============================================================================
# # IMPORT STANDARD MODS -------------------------------------------------------
#===============================================================================
import logging, os, time, re, math, copy, gc, weakref
"""
unused
sys, imp,
"""
import pandas as pd
import numpy as np
import scipy.integrate
#===============================================================================
# shortcuts
#===============================================================================
#from collections import OrderedDict
from hp.dict import MyOrderedDict as OrderedDict
from weakref import WeakValueDictionary as wdict
from weakref import proxy
from hp.basic import OrderedSet
#===============================================================================
# IMPORT CUSTOM MODS ---------------------------------------------------------
#===============================================================================
import hp.plot
import hp.basic
import hp.pd
import hp.oop
import hp.data
import fdmg.datos as datos
import matplotlib.pyplot as plt
import matplotlib
#import matplotlib.animation as animation #load the animation module (with the new search path)
import udev.scripts
mod_logger = logging.getLogger(__name__)
mod_logger.debug('initilized')
#===============================================================================
#module level defaults ------------------------------------------------------
#===============================================================================
#datapars_cols = [u'dataname', u'desc', u'datafile_tailpath', u'datatplate_tailpath', u'trim_row'] #headers in the data tab
datafile_types_list = ['.csv', '.xls']
idx = pd.IndexSlice
class Fdmg( #flood damage model
hp.sel.Sel_controller, #no init
hp.dyno.Dyno_wrap, #add some empty containers
hp.plot.Plot_o, #build the label
hp.sim.Sim_model, #Sim_wrap: attach the reset_d. Sim_model: inherit attributes
hp.oop.Trunk_o, #no init
#Parent_cmplx: attach empty kids_sd
#Parent: set some defaults
hp.oop.Child):
"""
#===========================================================================
# INPUTS
#===========================================================================
pars_path ==> pars_file.xls
main external parameter spreadsheet.
See description in file for each column
dataset parameters
tab = 'data'. expected columns: datapars_cols
session parameters
tab = 'gen'. expected rows: sessionpars_rows
"""
#===========================================================================
# program parameters
#===========================================================================
name = 'fdmg'
#list of attribute names to try and inherit from the session
try_inherit_anl = set(['ca_ltail', 'ca_rtail', 'mind', \
'dbg_fld_cnt', 'legacy_binv_f', 'gis_area_max', \
'fprob_mult', 'flood_tbl_nm', 'gpwr_aep', 'dmg_rat_f',\
'joist_space', 'G_anchor_ht', 'bsmt_opn_ht_code','bsmt_egrd_code', \
'damp_func_code', 'cont_val_scale', 'hse_skip_depth', \
'area_egrd00', 'area_egrd01', 'area_egrd02',
'fhr_nm', 'write_fdmg_sum', 'dfeat_xclud_price', 'write_fdmg_sum_fly'])
fld_aep_spcl = 100 #special flood to try and include in db runs
bsmt_egrd = 'wet' #default value for bsmt_egrd
legacy_binv_f = True #flag to indicate that the binv is in legacy format (use indicies rather than column labels)
gis_area_max = 3500
#lsit of data o names expected on the fdmg tab
#state = 'na' #for tracking what flood aep is currently in the model
'consider allowing the user control of these'
gis_area_min = 5
gis_area_max = 5000
write_fdmg_sum_fly = False
write_dmg_fly_first = True #start off to signifiy first run
#===========================================================================
# debuggers
#===========================================================================
beg_hist_df = None
#===========================================================================
# user provided values
#===========================================================================
#legacy pars
floor_ht = 0.0
mind = '' #column to match between data sets and name the house objects
#EAD calc
ca_ltail ='flat'
ca_rtail =2 #aep at which zero value is assumeed. 'none' uses lowest aep in flood set
#Floodo controllers
gpwr_aep = 100 #default max aep where gridpower_f = TRUE (when the power shuts off)
dbg_fld_cnt = 0
#area exposure
area_egrd00 = None
area_egrd01 = None
area_egrd02 = None
#Dfunc controllers
place_codes = None
dmg_types = None
flood_tbl_nm = None #name of the flood table to use
#timeline deltas
'just keeping this on the fdmg for simplicitly.. no need for flood level heterogenieyt'
wsl_delta = 0.0
fprob_mult = 1.0 #needs to be a float for type matching
dmg_rat_f = False
#Fdmg.House pars
joist_space = 0.3
G_anchor_ht = 0.6
bsmt_egrd_code = 'plpm'
damp_func_code = 'seep'
bsmt_opn_ht_code = '*min(2.0)'
hse_skip_depth = -4 #depth to skip house damage calc
fhr_nm = ''
cont_val_scale = .25
write_fdmg_sum = True
dfeat_xclud_price = 0.0
#===========================================================================
# calculation parameters
#===========================================================================
res_fancy = None
gpwr_f = True #placeholder for __init__ calcs
fld_aep_l = None
dmg_dx_base = None #results frame for writing
plotr_d = None #dictionary of EAD plot workers
dfeats_d = dict() #{tag:dfeats}. see raise_all_dfeats()
fld_pwr_cnt = 0
seq = 0
#damage results/stats
dmgs_df = None
dmgs_df_wtail = None #damage summaries with damages for the tail logic included
ead_tot = 0
dmg_tot = 0
#===========================================================================
# calculation data holders
#===========================================================================
dmg_dx = None #container for full run results
bdry_cnt = 0
bwet_cnt = 0
bdamp_cnt = 0
def __init__(self,*vars, **kwargs):
logger = mod_logger.getChild('Fdmg')
#=======================================================================
# initilize cascade
#=======================================================================
super(Fdmg, self).__init__(*vars, **kwargs) #initilzie teh baseclass
#=======================================================================
# object updates
#=======================================================================
self.reset_d.update({'ead_tot':0, 'dmgs_df':None, 'dmg_dx':None,\
'wsl_delta':0}) #update the rest attributes
#=======================================================================
# pre checks
#=======================================================================
self.check_pars() #check the data loaded on your tab
if not self.session._write_data:
self.write_fdmg_sum = False
#=======================================================================
#setup functions
#=======================================================================
#par cleaners/ special loaders
logger.debug("load_hse_geo() \n")
self.load_hse_geo()
logger.info('load and clean dfunc data \n')
self.load_pars_dfunc(self.session.pars_df_d['dfunc']) #load the data functions to damage type table
logger.debug('\n')
self.setup_dmg_dx_cols()
logger.debug('load_submodels() \n')
self.load_submodels()
logger.debug('init_dyno() \n')
self.init_dyno()
#outputting setup
if self.write_fdmg_sum_fly:
self.fly_res_fpath = os.path.join(self.session.outpath, '%s fdmg_res_fly.csv'%self.session.tag)
if self.db_f:
if not self.model.__repr__() == self.__repr__():
raise IOError
logger.info('Fdmg model initialized as \'%s\' \n'%(self.name))
def check_pars(self): #check your data pars
df_raw = self.session.pars_df_d['datos']
#=======================================================================
# check mandatory data objects
#=======================================================================
if not 'binv' in df_raw['name'].tolist():
raise IOError
#=======================================================================
# check optional data objects
#=======================================================================
fdmg_tab_nl = ['rfda_curve', 'binv','dfeat_tbl', 'fhr_tbl']
boolidx = df_raw['name'].isin(fdmg_tab_nl)
if not np.all(boolidx):
raise IOError #passed some unexpected data names
return
def load_submodels(self):
logger = self.logger.getChild('load_submodels')
self.state = 'load'
#=======================================================================
# data objects
#=======================================================================
'this is the main loader that builds all teh children as specified on the data tab'
logger.info('loading dat objects from \'fdmg\' tab')
logger.debug('\n \n')
#build datos from teh data tab
'todo: hard code these class types (rather than reading from teh control file)'
self.fdmgo_d = self.raise_children_df(self.session.pars_df_d['datos'], #df to raise on
kid_class = None) #should raise according to df entry
self.session.prof(state='load.fdmg.datos')
'WARNING: fdmgo_d is not set until after ALL the children on this tab are raised'
#attach special children
self.binv = self.fdmgo_d['binv']
"""NO! this wont hold resetting updates
self.binv_df = self.binv.childmeta_df"""
#=======================================================================
# flood tables
#=======================================================================
self.ftblos_d = self.raise_children_df(self.session.pars_df_d['flood_tbls'], #df to raise on
kid_class = datos.Flood_tbl) #should raise according to df entry
'initial call which only udpates the binv_df'
self.set_area_prot_lvl()
if 'fhr_tbl' in self.fdmgo_d.keys():
self.set_fhr()
#=======================================================================
# dfeats
#======================================================================
if self.session.load_dfeats_first_f & self.session.wdfeats_f:
logger.debug('raise_all_dfeats() \n')
self.dfeats_d = self.fdmgo_d['dfeat_tbl'].raise_all_dfeats()
#=======================================================================
# raise houses
#=======================================================================
logger.info('raising houses')
logger.debug('\n')
self.binv.raise_houses()
self.session.prof(state='load.fdmg.houses')
'calling this here so all of the other datos are raised'
#self.rfda_curve = self.fdmgo_d['rfda_curve']
"""No! we need to get this in before the binv.reset_d['childmeta_df'] is set
self.set_area_prot_lvl() #apply the area protectino from teh named flood table"""
logger.info('loading floods')
logger.debug('\n \n')
self.load_floods()
self.session.prof(state='load.fdmg.floods')
logger.debug("finished with %i kids\n"%len(self.kids_d))
return
def setup_dmg_dx_cols(self): #get teh columns to use for fdmg results
"""
This is setup to generate a unique set of ordered column names with this logic
take the damage types
add mandatory fields
add user provided fields
"""
logger = self.logger.getChild('setup_dmg_dx_cols')
#=======================================================================
#build the basic list of column headers
#=======================================================================
#damage types at the head
col_os = OrderedSet(self.dmg_types) #put
#basic add ons
_ = col_os.update(['total', 'hse_depth', 'wsl', 'bsmt_egrd', 'anchor_el'])
#=======================================================================
# special logic
#=======================================================================
if self.dmg_rat_f:
for dmg_type in self.dmg_types:
_ = col_os.add('%s_rat'%dmg_type)
if not self.wsl_delta==0:
col_os.add('wsl_raw')
"""This doesnt handle runs where we start with a delta of zero and then add some later
for these, you need to expplicitly call 'wsl_raw' in the dmg_xtra_cols_fat"""
#ground water damage
if 'dmg_gw' in self.session.outpars_d['Flood']:
col_os.add('gw_f')
#add the dem if necessary
if 'gw_f' in col_os:
col_os.add('dem_el')
#=======================================================================
# set pars based on user provided
#=======================================================================
#s = self.session.outpars_d[self.__class__.__name__]
#extra columns for damage resulst frame
if self.db_f or self.session.write_fdmg_fancy:
logger.debug('including extra columns in outputs')
#clewan the extra cols
'todo: move this to a helper'
if hasattr(self.session, 'xtra_cols'):
try:
dc_l = eval(self.session.xtra_cols) #convert to a list
except:
logger.error('failed to convert \'xtra_cols\' to a list. check formatting')
raise IOError
else:
dc_l = ['wsl_raw', 'gis_area', 'hse_type', 'B_f_height', 'BS_ints','gw_f']
if not isinstance(dc_l, list): raise IOError
col_os.update(dc_l) #add these
self.dmg_df_cols = col_os
logger.debug('set dmg_df_cols as: %s'%self.dmg_df_cols)
return
def load_pars_dfunc(self, df_raw=None): #build a df from the dfunc tab
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('load_pars_dfunc')
dfunc_ecols = ['place_code','dmg_code','dfunc_type','anchor_ht_code']
if df_raw is None:
df_raw = self.session.pars_df_d['dfunc']
logger.debug('from df %s: \n %s'%(str(df_raw.shape), df_raw))
#=======================================================================
# clean
#=======================================================================
df1 = df_raw.dropna(axis='columns', how='all')
df2 = df1.dropna(axis='index', how='all') #drop rows with all na
#column check
if not hp.pd.header_check(df2, dfunc_ecols, logger=logger):
raise IOError
#=======================================================================
# custom columns
#=======================================================================
df3 = df2.copy(deep=True)
df3['dmg_type'] = df3['place_code'] + df3['dmg_code']
df3['name'] = df3['dmg_type']
#=======================================================================
# data loading
#=======================================================================
if 'tailpath' in df3.columns:
boolidx = ~pd.isnull(df3['tailpath']) #get dfuncs with data requests
self.load_raw_dfunc(df3[boolidx])
df3 = df3.drop(['headpath', 'tailpath'], axis = 1, errors='ignore') #drop these columns
#=======================================================================
# garage checking
#=======================================================================
boolidx = np.logical_and(df3['place_code'] == 'G', df3['dfunc_type'] == 'rfda')
if np.any(boolidx):
logger.error('got dfunc_type = rfda for a garage curve (no such thing)')
raise IOError
#=======================================================================
# get special lists
#=======================================================================
#dmg_types
self.dmg_types = df3['dmg_type'].tolist()
#damage codes
boolidx = df3['place_code'].str.contains('total')
self.dmg_codes = df3.loc[~boolidx, 'dmg_code'].unique().tolist()
#place_codes
place_codes = df3['place_code'].unique().tolist()
if 'total' in place_codes: place_codes.remove('total')
self.place_codes = place_codes
self.session.pars_df_d['dfunc'] = df3
logger.debug('dfunc_df with %s'%str(df3.shape))
#=======================================================================
# get slice for houses
#=======================================================================
#identify all the entries except total
boolidx = df3['place_code'] != 'total'
self.house_childmeta_df = df3[boolidx] #get this trim
"""
hp.pd.v(df3)
"""
def load_hse_geo(self): #special loader for hse_geo dxcol (from tab hse_geo)
logger = self.logger.getChild('load_hse_geo')
#=======================================================================
# load and clean the pars
#=======================================================================
df_raw = hp.pd.load_xls_df(self.session.parspath,
sheetname = 'hse_geo', header = [0,1], logger = logger)
df = df_raw.dropna(how='all', axis = 'index')
self.session.pars_df_d['hse_geo'] = df
#=======================================================================
# build a blank starter for each house to fill
#=======================================================================
omdex = df.columns #get the original mdex
'probably a cleaner way of doing this'
lvl0_values = omdex.get_level_values(0).unique().tolist()
lvl1_values = omdex.get_level_values(1).unique().tolist()
lvl1_values.append('t')
newcols = pd.MultiIndex.from_product([lvl0_values, lvl1_values],
names=['place_code','finish_code'])
geo_dxcol = pd.DataFrame(index = df.index, columns = newcols) #make the frame
self.geo_dxcol_blank = geo_dxcol
if self.db_f:
if np.any(pd.isnull(df)):
raise IOError
l = geo_dxcol.index.tolist()
if not l == [u'area', u'height', u'per', u'inta']:
raise IOError
return
def load_raw_dfunc(self, meta_df_raw): #load raw data for dfuncs
logger = self.logger.getChild('load_raw_dfunc')
logger.debug('with df \'%s\''%(str(meta_df_raw.shape)))
d = dict() #empty container
meta_df = meta_df_raw.copy()
#=======================================================================
# loop through each row and load the data
#=======================================================================
for indx, row in meta_df.iterrows():
inpath = os.path.join(row['headpath'], row['tailpath'])
df = hp.pd.load_smart_df(inpath,
index_col =None,
logger = logger)
d[row['name']] = df.dropna(how = 'all', axis = 'index') #store this into the dictionaryu
logger.info('finished loading raw dcurve data on %i dcurves: %s'%(len(d), d.keys()))
self.dfunc_raw_d = d
return
def load_floods(self):
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('load_floods')
logger.debug('setting floods df \n')
self.set_floods_df()
df = self.floods_df
logger.debug('raising floods \n')
d = self.raise_children_df(df, #build flood children
kid_class = Flood,
dup_sibs_f= True,
container = OrderedDict) #pass attributes from one tot eh next
#=======================================================================
# ordered by aep
#=======================================================================
fld_aep_od = OrderedDict()
for childname, childo in d.iteritems():
if hasattr(childo, 'ari'):
fld_aep_od[childo.ari] = childo
else: raise IOError
logger.info('raised and bundled %i floods by aep'%len(fld_aep_od))
self.fld_aep_od = fld_aep_od
return
def set_floods_df(self): #build the flood meta data
logger = self.logger.getChild('set_floods_df')
df_raw = self.session.pars_df_d['floods']
df1 = df_raw.sort_values('ari').reset_index(drop=True)
df1['ari'] = df1['ari'].astype(np.int)
#=======================================================================
# slice for debug set
#=======================================================================
if self.db_f & (not self.dbg_fld_cnt == 'all'):
#check that we even have enough to do the slicing
if len(df1) < 2:
logger.error('too few floods for debug slicing. pass dbg_fld_cnt == all')
raise IOError
df2 = pd.DataFrame(columns = df1.columns) #make blank starter frame
dbg_fld_cnt = int(self.dbg_fld_cnt)
logger.info('db_f=TRUE. selecting %i (of %i) floods'%(dbg_fld_cnt, len(df1)))
#===================================================================
# try to pull out and add the 100yr
#===================================================================
try:
boolidx = df1.loc[:,'ari'] == self.fld_aep_spcl
if not boolidx.sum() == 1:
logger.debug('failed to locate 1 flood')
raise IOError
df2 = df2.append(df1[boolidx]) #add this row to the end
df1 = df1[~boolidx] #slice out this row
dbg_fld_cnt = max(0, dbg_fld_cnt - 1) #reduce the loop count by 1
dbg_fld_cnt = min(dbg_fld_cnt, len(df1)) #double check in case we are given a very short set
logger.debug('added the %s year flood to the list with dbg_fld_cnt %i'%(self.fld_aep_spcl, dbg_fld_cnt))
except:
logger.debug('failed to extract the special %i flood'%self.fld_aep_spcl)
df2 = df1.copy()
#===================================================================
# build list of extreme (low/high) floods
#===================================================================
evn_cnt = 0
odd_cnt = 0
for cnt in range(0, dbg_fld_cnt, 1):
if cnt % 2 == 0: #evens. pull from front
idxr = evn_cnt
evn_cnt += 1
else: #odds. pull from end
idxr = len(df1) - odd_cnt - 1
odd_cnt += 1
logger.debug('pulling flood with indexer %i'%(idxr))
ser = df1.iloc[idxr, :] #make thsi slice
df2 = df2.append(ser) #append this to the end
#clean up
df = df2.drop_duplicates().sort_values('ari').reset_index(drop=True)
logger.debug('built extremes flood df with %i aeps: %s'%(len(df), df.loc[:,'ari'].values.tolist()))
if not len(df) == int(self.dbg_fld_cnt):
raise IOError
else:
df = df1.copy()
if not len(df) > 0: raise IOError
self.floods_df = df
return
def set_area_prot_lvl(self): #assign the area_prot_lvl to the binv based on your tab
#logger = self.logger.getChild('set_area_prot_lvl')
"""
TODO: Consider moving this onto the binv and making the binv dynamic...
Calls:
handles for flood_tbl_nm
"""
logger = self.logger.getChild('set_area_prot_lvl')
logger.debug('assigning \'area_prot_lvl\' for \'%s\''%self.flood_tbl_nm)
#=======================================================================
# get data
#=======================================================================
ftbl_o = self.ftblos_d[self.flood_tbl_nm] #get the activated flood table object
ftbl_o.apply_on_binv('aprot_df', 'area_prot_lvl')
"""
hp.pd.v(binv_df)
type(df.iloc[:, 0])
"""
return True
def set_fhr(self): #assign the fhz bfe and zone from the fhr_tbl data
logger = self.logger.getChild('set_fhr')
logger.debug('assigning for \'fhz\' and \'bfe\'')
#get the data for this fhr set
fhr_tbl_o = self.fdmgo_d['fhr_tbl']
try:
df = fhr_tbl_o.d[self.fhr_nm]
except:
if not self.fhr_nm in fhr_tbl_o.d.keys():
logger.error('could not find selected fhr_nm \'%s\' in the loaded rule sets: \n %s'
%(self.fhr_nm, fhr_tbl_o.d.keys()))
raise IOError
#=======================================================================
# loop through each series and apply
#=======================================================================
"""
not the most generic way of handling this...
todo:
add generic method to the binv
can take ser or df
updates the childmeta_df if before init
updates the children if after init
"""
for hse_attn in ['fhz', 'bfe']:
ser = df[hse_attn]
if not self.session.state == 'init':
#=======================================================================
# tell teh binv to update its houses
#=======================================================================
self.binv.set_all_hse_atts(hse_attn, ser = ser)
else:
logger.debug('set column \'%s\' onto the binv_df'%hse_attn)
self.binv.childmeta_df.loc[:,hse_attn] = ser #set this column in teh binvdf
"""I dont like this
fhr_tbl_o.apply_on_binv('fhz_df', 'fhz', coln = self.fhr_nm)
fhr_tbl_o.apply_on_binv('bfe_df', 'bfe', coln = self.fhr_nm)"""
return True
def get_all_aeps_classic(self): #get the list of flood aeps from the classic flood table format
'kept this special syntax reader separate in case we want to change th eformat of the flood tables'
flood_pars_df = self.session.pars_df_d['floods'] #load the data from the flood table
fld_aep_l = flood_pars_df.loc[:, 'ari'].values #drop the 2 values and convert to a list
return fld_aep_l
def run(self, **kwargs): #placeholder for simulation runs
logger = self.logger.getChild('run')
logger.debug('on run_cnt %i'%self.run_cnt)
self.run_cnt += 1
self.state='run'
#=======================================================================
# prechecks
#=======================================================================
if self.db_f:
if not isinstance(self.outpath, basestring):
raise IOError
logger.info('\n fdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmg')
logger.info('for run_cnt %i'%self.run_cnt)
self.calc_fld_set(**kwargs)
return
def setup_res_dxcol(self, #setup the results frame
fld_aep_l = None,
#dmg_type_list = 'all',
bid_l = None):
#=======================================================================
# defaults
#=======================================================================
if bid_l == None: bid_l = self.binv.bid_l
if fld_aep_l is None: fld_aep_l = self.fld_aep_od.keys() #just get all teh keys from the dictionary
#if dmg_type_list=='all': dmg_type_list = self.dmg_types
#=======================================================================
# setup the dxind for writing
#=======================================================================
lvl0_values = fld_aep_l
lvl1_values = self.dmg_df_cols #include extra reporting columns
#fold these into a mdex (each flood_aep has all dmg_types)
columns = pd.MultiIndex.from_product([lvl0_values, lvl1_values],
names=['flood_aep','hse_atts'])
dmg_dx = pd.DataFrame(index = bid_l, columns = columns).sort_index() #make the frame
self.dmg_dx_base = dmg_dx.copy()
if self.db_f:
logger = self.logger.getChild('setup_res_dxcol')
if not self.beg_hist_df == False:
fld_aep_l.sort()
columns = pd.MultiIndex.from_product([fld_aep_l, ['bsmt_egrd', 'cond']],
names=['flood_aep','bsmt_egrd'])
self.beg_hist_df = pd.DataFrame(index=bid_l, columns = columns)
logger.info('recording bsmt_egrd history with %s'%str(self.beg_hist_df.shape))
else:
self.beg_hist_df = None
"""
dmg_dx.columns
"""
return
def calc_fld_set(self, #calc flood damage for the flood set
fld_aep_l = None, #list of flood aeps to calcluate
#dmg_type_list = 'all', #list of damage types to calculate
bid_l = None, #list of building names ot calculate
wsl_delta = None, #delta value to add to all wsl
wtf = None, #optinonal flag to control writing of dmg_dx (otherwise session.write_fdmg_set_dx is used)
**run_fld): #kwargs to send to run_fld
'we could separate the object creation and the damage calculation'
"""
#=======================================================================
# INPUTS
#=======================================================================
fld_aep_l: list of floods to calc
this can be a custom list built by the user
extracted from the flood table (see session.get_ftbl_aeps)
loaded from the legacy rfda pars (session.rfda_pars.fld_aep_l)\
bid_l: list of ids (matching the mind varaible set under Fdmg)
#=======================================================================
# OUTPUTS
#=======================================================================
dmg_dx: dxcol of flood damage across all dmg_types and floods
mdex
lvl0: flood aep
lvl1: dmg_type + extra cols
I wanted to have this flexible, so the dfunc could pass up extra headers
couldnt get it to work. instead used a global list and acheck
new headers must be added to the gloabl list and Dfunc.
index
bldg_id
#=======================================================================
# TODO:
#=======================================================================
setup to calc across binvs as well
"""
#=======================================================================
# defaults
#=======================================================================
start = time.time()
logger = self.logger.getChild('calc_fld_set')
if wtf is None: wtf = self.session.write_fdmg_set_dx
if wsl_delta is None: wsl_delta= self.wsl_delta
#=======================================================================
# setup and load the results frame
#=======================================================================
#check to see that all of these conditions pass
if not np.all([bid_l is None, fld_aep_l is None]):
logger.debug('non default run. rebuild the dmg_dx_base')
#non default run. rebuild the frame
self.setup_res_dxcol( fld_aep_l = fld_aep_l,
#dmg_type_list = dmg_type_list,
bid_l = bid_l)
elif self.dmg_dx_base is None: #probably the first run
if not self.run_cnt == 1: raise IOError
logger.debug('self.dmg_dx_base is None. rebuilding')
self.setup_res_dxcol(fld_aep_l = fld_aep_l,
#dmg_type_list = dmg_type_list,
bid_l = bid_l) #set it up with the defaults
dmg_dx = self.dmg_dx_base.copy() #just start witha copy of the base
#=======================================================================
# finish defaults
#=======================================================================
'these are all mostly for reporting'
if fld_aep_l is None: fld_aep_l = self.fld_aep_od.keys() #just get all teh keys from the dictionary
""" leaving these as empty kwargs and letting floods handle
if bid_l == None: bid_l = binv_dato.bid_l
if dmg_type_list=='all': dmg_type_list = self.dmg_types """
"""
lvl0_values = dmg_dx.columns.get_level_values(0).unique().tolist()
lvl1_values = dmg_dx.columns.get_level_values(1).unique().tolist()"""
logger.info('calc flood damage (%i) floods w/ wsl_delta = %.2f'%(len(fld_aep_l), wsl_delta))
logger.debug('ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff \n')
#=======================================================================
# loop and calc eacch flood
#=======================================================================
fcnt = 0
first = True
for flood_aep in fld_aep_l: #lopo through and build each flood
#self.session.prof(state='%s.fdmg.calc_fld_set.%i'%(self.get_id(), fcnt)) #memory profiling
self.state = flood_aep
'useful for keeping track of what the model is doing'
#get teh flood
flood_dato = self.fld_aep_od[flood_aep] #pull thsi from the dictionary
logger.debug('getting dmg_df for %s'%flood_dato.name)
#===================================================================
# run sequence
#===================================================================
#get damage for these depths
dmg_df = flood_dato.run_fld(**run_fld) #add the damage df to this slice
if dmg_df is None: continue #skip this one
#===================================================================
# wrap up
#===================================================================
dmg_dx[flood_aep] = dmg_df #store into the frame
fcnt += 1
logger.debug('for flood_aep \'%s\' on fcnt %i got dmg_df %s \n'%(flood_aep, fcnt, str(dmg_df.shape)))
#===================================================================
# checking
#===================================================================
if self.db_f:
#check that the floods are increasing
if first:
first = False
last_aep = None
else:
if not flood_aep > last_aep:
raise IOError
last_aep = flood_aep
#=======================================================================
# wrap up
#=======================================================================
self.state = 'na'
if wtf:
filetail = '%s %s %s %s res_fld'%(self.session.tag, self.simu_o.name, self.tstep_o.name, self.name)
filepath = os.path.join(self.outpath, filetail)
hp.pd.write_to_file(filepath, dmg_dx, overwrite=True, index=True) #send for writing
self.dmg_dx = dmg_dx
stop = time.time()
logger.info('in %.4f secs calcd damage on %i of %i floods'%(stop - start, fcnt, len(fld_aep_l)))
return
def get_results(self): #called by Timestep.run_dt()
self.state='wrap'
logger = self.logger.getChild('get_results')
#=======================================================================
# optionals
#=======================================================================
s = self.session.outpars_d[self.__class__.__name__]
if (self.session.write_fdmg_fancy) or (self.session.write_fdmg_sum):
logger.debug("calc_summaries \n")
dmgs_df = self.calc_summaries()
self.dmgs_df = dmgs_df.copy()
else: dmgs_df = None
if ('ead_tot' in s) or ('dmg_df' in s):
logger.debug('\n')
self.calc_annulized(dmgs_df = dmgs_df, plot_f = False)
'this will also run calc_sumamries if it hasnt happened yet'
if 'dmg_tot' in s:
#get a cross section of the 'total' column across all flood_aeps and sum for all entries
self.dmg_tot = self.dmg_dx.xs('total', axis=1, level=1).sum().sum()
if ('bwet_cnt' in s) or ('bdamp_cnt' in s) or ('bdry_cnt' in s):
logger.debug('get_fld_begrd_cnt')
self.get_fld_begrd_cnt()
if 'fld_pwr_cnt' in s:
logger.debug('calc_fld_pwr_cnt \n')
cnt = 0
for aep, obj in self.fld_aep_od.iteritems():
if obj.gpwr_f: cnt +=1
self.fld_pwr_cnt = cnt
self.binv.calc_binv_stats()
if self.session.write_fdmg_fancy:
self.write_res_fancy()
if self.write_fdmg_sum_fly: #write the results after each run
self.write_dmg_fly()
if self.db_f:
self.check_dmg_dx()
logger.debug('finished \n')
def calc_summaries(self, #annualize the damages
fsts_l = ['gpwr_f', 'dmg_sw', 'dmg_gw'], #list of additional flood attributes to report in teh summary
dmg_dx=None,
plot=False, #flag to execute plot_dmgs() at the end. better to do this explicitly with an outputr
wtf=None):
"""
basically dropping dimensions on the outputs and adding annuzlied damages
#=======================================================================
# OUTPUTS
#=======================================================================
DROP BINV DIMENSIOn
dmgs_df: df with
columns: raw damage types, and annualized damage types
index: each flood
entries: total damage for binv
DROP FLOODS DIMENSIOn
aad_sum_ser
DROP ALL DIMENSIONS
ead_tot
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('calc_summaries')
if dmg_dx is None: dmg_dx = self.dmg_dx.copy()
if plot is None: plot = self.session._write_figs
if wtf is None: wtf = self.write_fdmg_sum
#=======================================================================
# #setup frame
#=======================================================================
#get the columns
dmg_types = self.dmg_types + ['total']
#=======================================================================
# #build the annualized damage type names
#=======================================================================
admg_types = []
for entry in dmg_types: admg_types.append(entry+'_a')
cols = dmg_types + ['prob', 'prob_raw'] + admg_types + fsts_l
self.dmg_df_cols
"""
hp.pd.v(dmg_dx)
"""
dmgs_df = pd.DataFrame(columns = cols)
dmgs_df['ari'] = dmg_dx.columns.get_level_values(0).unique()
dmgs_df = dmgs_df.sort_values('ari').reset_index(drop=True)
#=======================================================================
# loop through and fill out the data
#=======================================================================
for index, row in dmgs_df.iterrows(): #loop through an dfill out
dmg_df = dmg_dx[row['ari']] #get the fdmg for this aep
#sum all the damage types
for dmg_type in dmg_types:
row[dmg_type] = dmg_df[dmg_type].sum() #sum them all up
#calc the probability
row['prob_raw'] = 1/float(row['ari']) #inverse of aep
row['prob'] = row['prob_raw'] * self.fprob_mult #apply the multiplier
#calculate the annualized damages
for admg_type in admg_types:
dmg_type = admg_type[:-2] #drop the a
row[admg_type] = row[dmg_type] * row['prob']
#===================================================================
# get stats from the floodo
#===================================================================
floodo = self.fld_aep_od[row['ari']]
for attn in fsts_l:
row[attn] = getattr(floodo, attn)
#===================================================================
# #add this row backinto the frame
#===================================================================
dmgs_df.loc[index,:] = row
#=======================================================================
# get series totals
#=======================================================================
dmgs_df = dmgs_df.sort_values('prob').reset_index(drop='true')
#=======================================================================
# closeout
#=======================================================================
logger.debug('annualized %i damage types for %i floods'%(len(dmg_type), len(dmgs_df)))
if wtf:
filetail = '%s dmg_sumry'%(self.session.state)
filepath = os.path.join(self.outpath, filetail)
hp.pd.write_to_file(filepath, dmgs_df, overwrite=True, index=False) #send for writing
logger.debug('set data with %s and cols: %s'%(str(dmgs_df.shape), dmgs_df.columns.tolist()))
if plot:
self.plot_dmgs(wtf=wtf)
#=======================================================================
# post check
#=======================================================================
if self.db_f:
#check for sort logic
if not dmgs_df.loc[:,'prob'].is_monotonic:
raise IOError
if not dmgs_df['total'].iloc[::-1].is_monotonic: #flip the order
logger.warning('bigger floods arent causing more damage')
'some of the flood tables seem bad...'
#raise IOError
#all probabilities should be larger than zero
if not np.all(dmgs_df.loc[:,'prob'] > 0):
raise IOError
return dmgs_df
def calc_annulized(self, dmgs_df = None,
ltail = None, rtail = None, plot_f=None,
dx = 0.001): #get teh area under the damage curve
"""
#=======================================================================
# INPUTS
#=======================================================================
ltail: left tail treatment code (low prob high damage)
flat: extend the max damage to the zero probability event
'none': don't extend the tail
rtail: right trail treatment (high prob low damage)
'none': don't extend
'2year': extend to zero damage at the 2 year aep
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('calc_annulized')
if ltail is None: ltail = self.ca_ltail
if rtail is None: rtail = self.ca_rtail
'plotter ignores passed kwargs here'
if plot_f is None: plot_f= self.session._write_figs
#=======================================================================
# get data
#=======================================================================
if dmgs_df is None:
dmgs_df = self.calc_summaries()
#df_raw = self.data.loc[:,('total', 'prob', 'ari')].copy().reset_index(drop=True)
'only slicing columns for testing'
df = dmgs_df.copy().reset_index(drop=True)
if len(df) == 1:
logger.warning('only got one flood entry. skipping')
self.ead_tot = 0
self.dmgs_df_wtail = df
return
logger.debug("with ltail = \'%s\', rtail = \'%s\' and df %s"%(ltail, rtail, str(df.shape)))
if self.db_f:
if len(df) <2:
logger.error('didnt get enough flood entries to calcluate EAD')
raw_input('press enter to continue any way....')
#=======================================================================
# left tail treatment
#=======================================================================
if ltail == 'flat':
#zero probability
'assume 1000yr flood is the max damage'
max_dmg = df['total'].max()*1.0001
df.loc[-1, 'prob'] = 0
df.loc[-1, 'ari'] = 999999
df.loc[-1, 'total'] = max_dmg
logger.debug('ltail == flat. duplicated danage %.2f at prob 0'%max_dmg)
elif ltail == 'none':
pass
else: raise IOError
'todo: add option for value multiplier'
#=======================================================================
# right tail
#=======================================================================
if rtail == 'none':
pass
elif hp.basic.isnum(rtail):
rtail_yr = float(rtail)
rtail_p = 1.0 / rtail_yr
max_p = df['prob'].max()
#floor check
if rtail_p < max_p:
logger.error('rtail_p (%.2f) < max_p (%.2f)'%(rtail_p, max_p))
raise IOError
#same
elif rtail_p == max_p:
logger.debug("rtail_p == min(xl. no changes made")
else:
logger.debug("adding zero damage for aep = %.1f"%rtail_yr)
#zero damage
'assume no damage occurs at the passed rtail_yr'
loc = len(df)
df.loc[loc, 'prob'] = rtail_p
df.loc[loc, 'ari'] = 1.0/rtail_p
df.loc[loc, 'total'] = 0
"""
hp.pd.view_web_df(self.data)
"""
else: raise IOError
#=======================================================================
# clean up
#=======================================================================
df = df.sort_index() #resort the index
if self.db_f:
'these should still hold'
if not df.loc[:,'prob'].is_monotonic:
raise IOError
"""see above
if not df['total'].iloc[::-1].is_monotonic:
raise IOError"""
x, y = df['prob'].values.tolist(), df['total'].values.tolist()
#=======================================================================
# find area under curve
#=======================================================================
try:
#ead_tot = scipy.integrate.simps(y, x, dx = dx, even = 'avg')
'this was giving some weird results'
ead_tot = scipy.integrate.trapz(y, x, dx = dx)
except:
logger.warning('scipy.integrate.trapz failed. setting ead_tot to zero')
ead_tot = 0
raise IOError
logger.info('found ead_tot = %.2f $/yr from %i points with tail_codes: \'%s\' and \'%s\''
%(ead_tot, len(y), ltail, rtail))
self.ead_tot = ead_tot
#=======================================================================
# checks
#=======================================================================
if self.db_f:
if pd.isnull(ead_tot):
raise IOError
if not isinstance(ead_tot, float):
raise IOError
if ead_tot <=0:
raise IOError
#=======================================================================
# update data with tails
#=======================================================================
self.dmgs_df_wtail = df.sort_index().reset_index(drop=True)
#=======================================================================
# generate plot
#=======================================================================
if plot_f:
self.plot_dmgs(self, right_nm = None, xaxis = 'prob', logx = False)
return
def get_fld_begrd_cnt(self): #tabulate the bsmt_egrd counts from each flood
logger = self.logger.getChild('get_fld_begrd_cnt')
#=======================================================================
# data setup
#=======================================================================
dmg_dx = self.dmg_dx.copy()
#lvl1_values = dmg_dx.columns.get_level_values(0).unique().tolist()
#get all teh basement egrade types
df1 = dmg_dx.loc[:,idx[:, 'bsmt_egrd']] #get a slice by level 2 values
#get occurances by value
d = hp.pd.sum_occurances(df1, logger=logger)
#=======================================================================
# loop and calc
#=======================================================================
logger.debug('looping through %i bsmt_egrds: %s'%(len(d), d.keys()))
for bsmt_egrd, cnt in d.iteritems():
attn = 'b'+bsmt_egrd +'_cnt'
logger.debug('for \'%s\' got %i'%(attn, cnt))
setattr(self, attn, cnt)
logger.debug('finished \n')
def check_dmg_dx(self): #check logical consistency of the damage results
logger = self.logger.getChild('check_dmg_dx')
#=======================================================================
# data setup
#=======================================================================
dmg_dx = self.dmg_dx.copy()
mdex = dmg_dx.columns
aep_l = mdex.get_level_values(0).astype(int).unique().values.tolist()
aep_l.sort()
#=======================================================================
# check that each flood increases in damage
#=======================================================================
total = None
aep_last = None
for aep in aep_l:
#get this slice
df = dmg_dx[aep]
if total is None:
boolcol = np.isin(df.columns, ['MS', 'MC', 'BS', 'BC', 'GS']) #identify damage columns
total = df.loc[:,boolcol].sum().sum()
if not aep == min(aep_l):
raise IOError
else:
newtot = df.loc[:,boolcol].sum().sum()
if not newtot >= total:
logger.warning('aep %s tot %.2f < aep %s %.2f'%(aep, newtot, aep_last, total))
#raise IOError
#print 'new tot %.2f > oldtot %.2f'%(newtot, total)
total = newtot
aep_last = aep
return
def wrap_up(self):
#=======================================================================
# update asset containers
#=======================================================================
"""
#building inventory
'should be flagged for updating during House.notify()'
if self.binv.upd_kid_f:
self.binv.update()"""
"""dont think we need this here any more.. only on udev.
keeping it just to be save"""
self.last_tstep = copy.copy(self.time)
self.state='close'
def write_res_fancy(self, #for saving results in xls per tab. called as a special outputr
dmg_dx=None,
include_ins = False,
include_raw = False,
include_begh = False):
"""
#=======================================================================
# INPUTS
#=======================================================================
include_ins: whether ot add inputs as tabs.
ive left this separate from the 'copy_inputs' flag as it is not a true file copy of the inputs
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('write_res_fancy')
if dmg_dx is None: dmg_dx = self.dmg_dx
if dmg_dx is None:
logger.warning('got no dmg_dx. skipping')
return
#=======================================================================
# setup
#=======================================================================
od = OrderedDict()
#=======================================================================
# add the parameters
#=======================================================================
#get the blank frame
df = pd.DataFrame(columns = ['par','value'] )
df['par'] = list(self.try_inherit_anl)
for indx, row in df.iterrows():
df.iloc[indx, 1] = getattr(self, row['par']) #set this value
od['pars'] = df
#=======================================================================
# try and add damage summary
#=======================================================================
if not self.dmgs_df is None:
od['dmg summary'] = self.dmgs_df
#=======================================================================
# #get theh dmg_dx decomposed
#=======================================================================
od.update(hp.pd.dxcol_to_df_set(dmg_dx, logger=self.logger))
#=======================================================================
# #add dmg_dx as a raw tab
#=======================================================================
if include_raw:
od['raw_res'] = dmg_dx
#=======================================================================
# add inputs
#=======================================================================
if include_ins:
for dataname, dato in self.kids_d.iteritems():
if hasattr(dato, 'data') & hp.pd.isdf(dato.data):
od[dataname] = dato.data
#=======================================================================
# add debuggers
#=======================================================================
if include_begh:
if not self.beg_hist_df is None:
od['beg_hist'] = self.beg_hist_df
#=======================================================================
# #write to excel
#=======================================================================
filetail = '%s %s %s %s fancy_res'%(self.session.tag, self.simu_o.name, self.tstep_o.name, self.name)
filepath = os.path.join(self.outpath, filetail)
hp.pd.write_dfset_excel(od, filepath, engine='xlsxwriter', logger=self.logger)
return
def write_dmg_fly(self): #write damage results after each run
logger = self.logger.getChild('write_dmg_fly')
dxcol = self.dmg_dx #results
#=======================================================================
# build the resuults summary series
#=======================================================================
#get all the flood aeps
lvl0vals = dxcol.columns.get_level_values(0).unique().astype(int).tolist()
#blank holder
res_ser = pd.Series(index = lvl0vals)
#loop and calc sums for each flood
for aep in lvl0vals:
res_ser[aep] = dxcol.loc[:,(aep,'total')].sum()
#add extras
if not self.ead_tot is None:
res_ser['ead_tot'] = self.ead_tot
res_ser['dt'] = self.tstep_o.year
res_ser['sim'] = self.simu_o.ind
lindex = '%s.%s'%(self.simu_o.name, self.tstep_o.name)
hp.pd.write_fly_df(self.fly_res_fpath,res_ser, lindex = lindex,
first = self.write_dmg_fly_first, tag = 'fdmg totals',
db_f = self.db_f, logger=logger) #write results on the fly
self.write_dmg_fly_first = False
return
def get_plot_kids(self): #raise kids for plotting the damage summaries
logger = self.logger.getChild('get_plot_kids')
#=======================================================================
# get slice of aad_fmt_df matching the aad cols
#=======================================================================
aad_fmt_df = self.session.pars_df_d['dmg_sumry_plot'] #pull teh formater pars from the tab
dmgs_df = self.dmgs_df
self.data = dmgs_df
boolidx = aad_fmt_df.loc[:,'name'].isin(dmgs_df.columns) #get just those formaters with data in the aad
aad_fmt_df_slice = aad_fmt_df[boolidx] #get this slice3
"""
hp.pd.view_web_df(self.data)
hp.pd.view_web_df(df)
hp.pd.view_web_df(aad_fmt_df_slice)
aad_fmt_df_slice.columns
"""
#=======================================================================
# formatter kids setup
#=======================================================================
"""need to run this every time so the data is updated
TODO: allow some updating here so we dont have to reduibl deach time
if self.plotter_kids_dict is None:"""
self.plotr_d = self.raise_children_df(aad_fmt_df_slice, kid_class = hp.data.Data_o)
logger.debug('finisehd \n')
def plot_dmgs(self, wtf=None, right_nm = None, xaxis = 'ari', logx = True,
ylims = None, #tuple of min/max values for the y-axis
): #plot curve of aad
"""
see tab 'aad_fmt' to control what is plotted and formatting
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('plot_dmgs')
if wtf == None: wtf = self.session._write_figs
#=======================================================================
# prechecks
#=======================================================================
if self.db_f:
if self.dmgs_df is None:
raise IOError
#=======================================================================
# setup
#=======================================================================
if not ylims is None:
try:
ylims = eval(ylims)
except:
pass
#get the plot workers
if self.plotr_d is None:
self.get_plot_kids()
kids_d = self.plotr_d
title = '%s-%s-%s EAD-ARI plot on %i objs'%(self.session.tag, self.simu_o.name, self.name, len(self.binv.childmeta_df))
logger.debug('with \'%s\''%title)
if not self.tstep_o is None:
title = title + ' for %s'%self.tstep_o.name
#=======================================================================
# update plotters
#=======================================================================
logger.debug('updating plotters with my data')
#get data
data_og = self.data.copy() #store this for later
if self.dmgs_df_wtail is None:
df = self.dmgs_df.copy()
else:
df = self.dmgs_df_wtail.copy()
df = df.sort_values(xaxis, ascending=True)
#reformat data
df.set_index(xaxis, inplace = True)
#re set
self.data = df
#tell kids to refresh their data from here
for gid, obj in kids_d.iteritems(): obj.data = obj.loadr_vir()
self.data = data_og #reset the data
#=======================================================================
# get annotation
#=======================================================================
val_str = '$' + "{:,.2f}".format(self.ead_tot/1e6)
#val_str = "{:,.2f}".format(self.ead_tot)
"""
txt = 'total aad: $%s \n tail kwargs: \'%s\' and \'%s\' \n'%(val_str, self.ca_ltail, self.ca_rtail) +\
'binv.cnt = %i, floods.cnt = %i \n'%(self.binv.cnt, len(self.fld_aep_od))"""
txt = 'total EAD = %s'%val_str
#=======================================================================
#plot the workers
#=======================================================================
#twinx
if not right_nm is None:
logger.debug('twinning axis with name \'%s\''%right_nm)
title = title + '_twin'
# sort children into left/right buckets by name to plot on each axis
right_pdb_d, left_pdb_d = self.sort_buckets(kids_d, right_nm)
if self.db_f:
if len (right_pdb_d) <1: raise IOError
#=======================================================================
# #send for plotting
#=======================================================================
'this plots both bundles by their data indexes'
ax1, ax2 = self.plot_twinx(left_pdb_d, right_pdb_d,
logx=logx, xlab = xaxis, title=title, annot = txt,
wtf=False)
'cant figure out why teh annot is plotting twice'
ax2.set_ylim(0, 1) #prob limits
legon = False
else:
logger.debug('single axis')
try:
del kids_d['prob']
except:
pass
pdb = self.get_pdb_dict(kids_d.values())
ax1 = self.plot_bundles(pdb,
logx=logx, xlab = 'ARI', ylab = 'damage ($ 10^6)', title=title, annot = txt,
wtf=False)
legon=True
#hatch
#=======================================================================
# post formatting
#=======================================================================
#set axis limits
if xaxis == 'ari': ax1.set_xlim(1, 1000) #aep limits
elif xaxis == 'prob': ax1.set_xlim(0, .6)
if not ylims is None:
ax1.set_ylim(ylims[0], ylims[1])
#ax1.set_ylim(0, ax1.get_ylim()[1]) #$ limits
#=======================================================================
# format y axis labels
#======================================================= ================
old_tick_l = ax1.get_yticks() #get teh old labels
# build the new ticks
l = []
for value in old_tick_l:
new_v = '$' + "{:,.0f}".format(value/1e6)
l.append(new_v)
#apply the new labels
ax1.set_yticklabels(l)
"""
#add thousands comma
ax1.get_yaxis().set_major_formatter(
#matplotlib.ticker.FuncFormatter(lambda x, p: '$' + "{:,.2f}".format(x/1e6)))
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))"""
if xaxis == 'ari':
ax1.get_xaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
if wtf:
fig = ax1.figure
savepath_raw = os.path.join(self.outpath,title)
flag = hp.plot.save_fig(self, fig, savepath_raw=savepath_raw, dpi = self.dpi, legon=legon)
if not flag: raise IOError
#plt.close()
return
class Flood(
hp.dyno.Dyno_wrap,
hp.sim.Sim_o,
hp.oop.Parent, #flood object worker
hp.oop.Child):
#===========================================================================
# program pars
#===========================================================================
gpwr_f = False #grid power flag palceholder
#===========================================================================
# user defineid pars
#===========================================================================
ari = None
#loaded from flood table
#area exposure grade. control for areas depth decision algorhithim based on the performance of macro structures (e.g. dykes).
area_egrd00 = ''
area_egrd01 = ''
area_egrd02 = ''
area_egrd00_code = None
area_egrd01_code = None
area_egrd02_code = None
#===========================================================================
# calculated pars
#===========================================================================
hdep_avg = 0 #average house depth
#damate properties
total = 0
BS = 0
BC = 0
MS = 0
MC = 0
dmg_gw = 0
dmg_sw = 0
dmg_df_blank =None
wsl_avg = 0
#===========================================================================
# data containers
#===========================================================================
hdmg_cnt = 0
dmg_df = None
dmg_res_df = None
#bsmt_egrd counters. see get_begrd_cnt()
bdry_cnt = 0
bwet_cnt = 0
bdamp_cnt = 0
def __init__(self, parent, *vars, **kwargs):
logger = mod_logger.getChild('Flood')
logger.debug('start _init_')
#=======================================================================
# #attach custom vars
#=======================================================================
self.inherit_parent_ans=set(['mind', 'dmg_types'])
#=======================================================================
# initilize cascade
#=======================================================================
super(Flood, self).__init__(parent, *vars, **kwargs) #initilzie teh baseclass
#=======================================================================
# common setup
#=======================================================================
if self.sib_cnt == 0:
#update the resets
pass
#=======================================================================
# unique setup
#=======================================================================
""" handled by the outputr
self.reset_d.update({'hdmg_cnt':0})"""
self.ari = int(self.ari)
self.dmg_res_df = pd.DataFrame() #set as an empty frame for output handling
#=======================================================================
# setup functions
#=======================================================================
self.set_gpwr_f()
logger.debug('set_dmg_df_blank()')
self.set_dmg_df_blank()
logger.debug('get your water levels from the selected wsl table \n')
self.set_wsl_frm_tbl()
logger.debug('set_area_egrd()')
self.set_area_egrd()
logger.debug('get_info_from_binv()')
df = self.get_info_from_binv() #initial run to set blank frame
self.set_wsl_from_egrd(df)
""" moved into set_wsl_frm_tbl()
logger.debug('\n')
self.setup_dmg_df()"""
self.init_dyno()
self.logger.debug('__init___ finished \n')
def set_dmg_df_blank(self):
logger = self.logger.getChild('set_dmg_df_blank')
binv_df = self.model.binv.childmeta_df
colns = OrderedSet(self.model.dmg_df_cols.tolist() + ['wsl', 'area_prot_lvl'])
'wsl should be redundant'
#get boolean
self.binvboolcol = binv_df.columns.isin(colns) #store this for get_info_from_binv()
#get teh blank frame
self.dmg_df_blank = pd.DataFrame(columns = colns, index = binv_df.index) #get the blank frame
'this still needs the wsl levels attached based on your area exposure grade'
logger.debug('set dmg_df_blank with %s'%(str(self.dmg_df_blank.shape)))
return
def set_gpwr_f(self): #set your power flag
if self.is_frozen('gpwr_f'): return True#shortcut for frozen
logger = self.logger.getChild('set_gpwr_f')
#=======================================================================
# get based on aep
#=======================================================================
min_aep = int(self.model.gpwr_aep)
if self.ari < min_aep: gpwr_f = True
else: gpwr_f = False
logger.debug('for min_aep = %i, set gpwr_f = %s'%(min_aep, gpwr_f))
#update handler
self.handle_upd('gpwr_f', gpwr_f, proxy(self), call_func = 'set_gpwr_f')
return True
def set_wsl_frm_tbl(self, #build the raw wsl data from the passed flood table
flood_tbl_nm = None, #name of flood table to pull raw data from
#bid_l=None,
):
"""
here we get the raw values
these are later modified by teh area_egrd with self.get_wsl_from_egrd()
#=======================================================================
# INPUTS
#=======================================================================
flood_tbl_df_raw: raw df of the classic flood table
columns:` count, aep, aep, aep, aep....\
real_columns: bldg_id, CPID, depth, depth, depth, etc...
index: unique arbitrary
wsl_ser: series of wsl for this flood on each bldg_id
#=======================================================================
# calls
#=======================================================================
dynp handles Fdmg.flood_tbl_nm
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('set_wsl_frm_tbl')
if flood_tbl_nm is None: flood_tbl_nm = self.model.flood_tbl_nm
#=======================================================================
# get data
#=======================================================================
#pull the raw flood tables
ftbl_o = self.model.ftblos_d[flood_tbl_nm]
wsl_d = ftbl_o.wsl_d
df = pd.DataFrame(index = wsl_d.values()[0].index) #blank frame from teh first entry
#=======================================================================
# loop and apply for each flood type
#=======================================================================
for ftype, df1 in wsl_d.iteritems():
#=======================================================================
# data checks
#=======================================================================
if self.db_f:
if not ftype in ['wet', 'dry', 'damp']:
raise IOError
df_raw =df1.copy()
if not self.ari in df_raw.columns:
logger.error('the flood provided on the \'floods\' tab (\'%s\') does not have a match in the flood table: \n %s'%
(self.ari, self.model.ftblos_d[flood_tbl_nm].filepath))
raise IOError
#=======================================================================
# slice for this flood
#=======================================================================
boolcol = df1.columns == self.ari #slice for this aep
#get the series for this
wsl_ser = df1.loc[:, boolcol].iloc[:,0].astype(float)
#wsl_ser = wsl_ser.rename(ftype) #rename with the aep
'binv slicing moved to Flood_tbl.clean_data()'
#=======================================================================
# checks
#=======================================================================
if self.db_f:
if len(wsl_ser) <1:
raise IOError
""" allowing
#check for nuls
if np.any(pd.isnull(wsl_ser2)):
raise IOError"""
#=======================================================================
# wrap up report and attach
#=======================================================================
df[ftype] = wsl_ser
logger.debug('from \'%s\' for \'%s\' got wsl_ser %s for aep: %i'
%(flood_tbl_nm, ftype, str(wsl_ser.shape), self.ari))
self.wsl_df = df #set this
'notusing dy nps'
if self.session.state == 'init':
self.reset_d['wsl_df'] = df.copy()
return True
def set_area_egrd(self): #pull your area exposure grade from somewhere
"""
#=======================================================================
# calls
#=======================================================================
self.__init__()
dynp handles: Fdmg.flood_tbl_nm (just in case we are pulling from there
"""
#=======================================================================
# dependency check
#=======================================================================
if not self.session.state=='init':
dep_l = [([self.model], ['set_area_prot_lvl()'])]
if self.deps_is_dated(dep_l, method = 'reque', caller = 'set_area_egrd'):
return False
logger = self.logger.getChild('set_area_egrd')
#=======================================================================
# steal egrd from elsewhere table if asked
#=======================================================================
for cnt in range(0,3,1): #loop through each one
attn = 'area_egrd%02d'%cnt
area_egrd_code = getattr(self, attn + '_code')
if area_egrd_code in ['dry', 'damp', 'wet']:
area_egrd = area_egrd_code
#===================================================================
# pull from teh flood table
#===================================================================
elif area_egrd_code == '*ftbl':
ftbl_o = self.model.ftblos_d[self.model.flood_tbl_nm] #get the flood tabl object
area_egrd = getattr(ftbl_o, attn) #get from teh table
#===================================================================
# pull from teh model
#===================================================================
elif area_egrd_code == '*model':
area_egrd = getattr(self.model, attn) #get from teh table
else:
logger.error('for \'%s\' got unrecognized area_egrd_code: \'%s\''%(attn, area_egrd_code))
raise IOError
#===================================================================
# set these
#===================================================================
self.handle_upd(attn, area_egrd, weakref.proxy(self), call_func = 'set_area_egrd')
'this should triger generating a new wsl set to teh blank_dmg_df'
logger.debug('set \'%s\' from \'%s\' as \'%s\''
%(attn, area_egrd_code,area_egrd))
if self.db_f:
if not area_egrd in ['dry', 'damp', 'wet']:
raise IOError
return True
def set_wsl_from_egrd(self, df = None): #calculate the wsl based on teh area_egrd
"""
This is a partial results retrival for non damage function results
TODO:
consider checking for depednency on House.area_prot_lvl
#=======================================================================
# calls
#=======================================================================
self.__init__
dynp handles Flood.area_egrd##
"""
#=======================================================================
# check dependencies and frozen
#=========================================================== ============
if not self.session.state=='init':
dep_l = [([self], ['set_area_egrd()', 'set_wsl_frm_tbl()'])]
if self.deps_is_dated(dep_l, method = 'reque', caller = 'set_wsl_from_egrd'):
return False
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('set_wsl_from_egrd')
#if wsl_delta is None: wsl_delta = self.model.wsl_delta
#=======================================================================
# get data
#=======================================================================
if df is None: df = self.get_info_from_binv()
'need to have updated area_prot_lvls'
#=======================================================================
# precheck
#=======================================================================
if self.db_f:
if not isinstance(df, pd.DataFrame): raise IOError
if not len(df) > 0: raise IOError
#=======================================================================
# add the wsl for each area_egrd
#=======================================================================
for prot_lvl in range(0,3,1): #loop through each one
#get your grade fro this prot_lvl
attn = 'area_egrd%02d'%prot_lvl
area_egrd = getattr(self, attn)
#identify the housese for this protection level
boolidx = df.loc[:,'area_prot_lvl'] == prot_lvl
if boolidx.sum() == 0: continue
#give them the wsl corresponding to this grade
df.loc[boolidx, 'wsl'] = self.wsl_df.loc[boolidx,area_egrd]
#set a tag for the area_egrd
if 'area_egrd' in df.columns:
df.loc[boolidx, 'area_egrd'] = area_egrd
logger.debug('for prot_lvl %i, set %i wsl from \'%s\''%(prot_lvl, boolidx.sum(), area_egrd))
#=======================================================================
# set this
#=======================================================================
self.dmg_df_blank = df
#=======================================================================
# post check
#=======================================================================
logger.debug('set dmg_df_blank with %s'%str(df.shape))
if self.session.state=='init':
self.reset_d['dmg_df_blank'] = df.copy()
if self.db_f:
if np.any(pd.isnull(df['wsl'])):
logger.error('got some wsl nulls')
raise IOError
return True
"""
hp.pd.v(df)
hp.pd.v(self.dmg_df_blank)
"""
def run_fld(self, **kwargs): #shortcut to collect all the functions for a simulation ru n
self.run_cnt += 1
dmg_df_blank = self.get_info_from_binv()
dmg_df = self.get_dmg_set(dmg_df_blank, **kwargs)
if self.db_f: self.check_dmg_df(dmg_df)
'leaving this here for simplicity'
self.calc_statres_flood(dmg_df)
return dmg_df
def get_info_from_binv(self):
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('get_info_from_binv')
binv_df = self.model.binv.childmeta_df
#pull static values
binvboolcol = self.binvboolcol
df = self.dmg_df_blank.copy()
'this should have wsl added to it from set_wsl_from_egrd()'
if self.db_f:
if not len(binvboolcol) == len(binv_df.columns):
logger.warning('got length mismatch between binvboolcol (%i) and the binv_df columns (%i)'%
(len(binvboolcol), len(binv_df.columns)))
'pandas will handle this mistmatch.. just ignores the end'
#=======================================================================
# #update with values from teh binv
#=======================================================================
df.update(binv_df.loc[:,binvboolcol], overwrite=True) #update from all the values in teh binv
logger.debug('retreived %i values from the binv_df on: %s'
%(binv_df.loc[:,binvboolcol].count().count(), binv_df.loc[:,binvboolcol].columns.tolist()))
#=======================================================================
# macro calcs
#=======================================================================
if 'hse_depth' in df.columns:
df['hse_depth'] = df['wsl'] - df['anchor_el']
#groudn water damage flag
if 'gw_f' in df.columns:
df.loc[:,'gw_f'] = df['dem_el'] > df['wsl'] #water is below grade
if self.db_f:
if 'bsmt_egrd' in binv_df.columns:
raise IOError
return df
def get_dmg_set(self, #calcluate the damage for each house
dmg_df, #empty frame for filling with damage results
#dmg_type_list='all',
#bid_l = None,
#wsl_delta = None,
dmg_rat_f =None, #includt eh damage ratio in results
):
"""
#=======================================================================
# INPUTS
#=======================================================================
depth_ser: series of depths (for this flood) with index = bldg_id
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('get_dmg_set(%s)'%self.get_id())
if dmg_rat_f is None: dmg_rat_f = self.model.dmg_rat_f
hse_od = self.model.binv.hse_od #ordred dictionary by bid: hse_dato
""" see get_wsl_from_egrd()
#=======================================================================
# build the dmg_df
#=======================================================================
bid_ar = self.model.binv.data.loc[:,self.mind].values.astype(np.int) #get everything from teh binv
dmg_df = pd.DataFrame(index = bid_ar, columns = self.model.dmg_df_cols)"""
#=======================================================================
# pre checks
#=======================================================================
if self.db_f:
if not isinstance(dmg_df, pd.DataFrame):
raise IOError
boolidx = dmg_df.index.isin(hse_od.keys())
if not np.all(boolidx):
logger.error('some of the bldg_ids in the wsl_ser were not found in the binv: \n %s'
%dmg_df.index[~boolidx])
raise IOError
#check the damage columns are empty
boolcol = np.isin(dmg_df.columns, ['MS', 'MC', 'BS', 'BC', 'GS', 'total']) #identify damage columns
if not np.all(pd.isnull(dmg_df.loc[:,boolcol])):
raise IOError
#=======================================================================
# frame setup
#=======================================================================
#identify columns containing damage results
dmgbool = np.logical_or(dmg_df.columns.isin(self.model.dmg_types), #damages
pd.Series(dmg_df.columns).str.contains('_rat').values) #damage ratios
#=======================================================================
# get teh damage for each house
#=======================================================================
logger.debug('getting damage for %s entries'%(str(dmg_df.shape)))
"""
to improve performance, we're only looping through those entries with real flood deths (skin_df)
however, the full results frame is still used (non_real entries should equal zero)
"""
"""generally no memory added during these
self.session.prof(state='%s.get_dmg_set.loop'%(self.name)) #memory profiling"""
cnt = 0
first = True
for index, row in dmg_df.iterrows(): #loop through each row
#===================================================================
# pre-printouts
#===================================================================
#self.session.prof(state='%s.get_dmg_set.%i'%(self.name, cnt)) #memory profiling
cnt +=1
if cnt%self.session._logstep == 0: logger.info(' (%i/%i)'%(cnt, len(dmg_df)))
#===================================================================
# retrive info
#===================================================================
hse_obj = hse_od[index] #get this house object by bldg_id
hse_obj.floodo = self #let the house know who is flooding it
logger.debug('on hse \'%s\' \n'%hse_obj.name)
#===================================================================
# add damage results
#===================================================================
if row['hse_depth'] < self.model.hse_skip_depth:
logger.debug('depth below hse_obj.vuln_el for bldg_id: %i. setting fdmg=0'%index)
row[dmgbool] = 0.0 #set all damage to zero
#depth significant. calc it
else:
#runt he house
logger.debug('running house \n')
dmg_ser = hse_obj.run_hse(row['wsl'], dmg_rat_f = dmg_rat_f)
row.update(dmg_ser) #add all these entries
#===================================================================
# extract extra attributers from teh house
#===================================================================
#find the entries to skip attribute in filling
if first:
boolar1 = ~np.isin(row.index, ['total'])
boolar2 = pd.isnull(row)
boolar = np.logical_and(boolar1, boolar2)
first = False
#fill thtese
for attn, v in row[boolar].iteritems():
row[attn] = getattr(hse_obj, attn)
#===================================================================
# wrap up
#===================================================================
dmg_df.loc[index,:] = row #store this row back into the full resulst frame
#=======================================================================
# macro stats
#=======================================================================
#total
boolcol = dmg_df.columns.isin(self.model.dmg_types)
dmg_df['total'] = dmg_df.iloc[:,boolcol].sum(axis = 1) #get the sum
#=======================================================================
# closeout and reporting
#=======================================================================
#print out summaries
if not self.db_f:
logger.info('finished for %i houses'%(len(dmg_df.index)))
else:
totdmg = dmg_df['total'].sum()
totdmg_str = '$' + "{:,.2f}".format(totdmg)
logger.info('got totdmg = %s for %i houses'%(totdmg_str,len(dmg_df.index)))
if np.any(pd.isnull(dmg_df)):
raise IOError
for dmg_type in self.model.dmg_types:
dmg_tot = dmg_df[dmg_type].sum()
dmg_tot_str = '$' + "{:,.2f}".format(dmg_tot)
logger.debug('for dmg_type \'%s\' dmg_tot = %s'%(dmg_type, dmg_tot_str))
return dmg_df
def check_dmg_df(self, df):
logger = self.logger.getChild('check_dmg_df')
#=======================================================================
# check totals
#=======================================================================
boolcol = np.isin(df.columns, ['MS', 'MC', 'BS', 'BC', 'GS']) #identify damage columns
if not round(df['total'].sum(),2) == round(df.loc[:, boolcol].sum().sum(), 2):
logger.error('total sum did not match sum from damages')
raise IOError
def calc_statres_flood(self, df): #calculate your statistics
'running this always'
logger = self.logger.getChild('calc_statres_flood')
s = self.session.outpars_d[self.__class__.__name__]
"""needed?
self.outpath = os.path.join(self.model.outpath, self.name)"""
#=======================================================================
# total damage
#=======================================================================
for dmg_code in self.model.dmg_types + ['total']:
#loop through and see if the user asked for this output
'e.g. MC, MS, BC, BS, total'
if dmg_code in s:
v = df[dmg_code].sum()
setattr(self, dmg_code, v)
logger.debug('set \'%s\' to %.2f'%(dmg_code, v))
#=======================================================================
# by flood type
#=======================================================================
if 'dmg_sw' in s:
self.dmg_sw = df.loc[~df['gw_f'], 'total'].sum() #sum all those with surface water
if 'dmg_gw' in s:
self.dmg_gw = df.loc[df['gw_f'], 'total'].sum() #sum all those with surface water
#=======================================================================
# number of houses with damage
#=======================================================================
if 'hdmg_cnt' in s:
boolidx = df.loc[:, 'total'] > 0
self.hdmg_cnt = boolidx.sum()
#=======================================================================
# average house depth
#=======================================================================
if 'hdep_avg' in s:
self.hdep_avg = np.mean(df.loc[:,'hse_depth'])
#=======================================================================
# wsl average
#=======================================================================
if 'wsl_avg' in s:
self.wsl_avg = np.mean(df.loc[:,'wsl'])
#=======================================================================
# basement exposure grade counts
#=======================================================================
'just calcing all if any of them are requested'
boolar = np.isin(np.array(['bwet_cnt', 'bdamp_cnt', 'bdry_cnt']),
np.array(s))
if np.any(boolar): self.get_begrd_cnt()
#=======================================================================
# plots
#=======================================================================
if 'dmg_res_df' in s:
self.dmg_res_df = df
"""
hp.pd.v(df)
"""
return
def get_begrd_cnt(self):
logger = self.logger.getChild('get_begrd_cnt')
df = self.dmg_res_df
#=======================================================================
# #get egrades
# try:
# ser = df.loc[:,'bsmt_egrd'] #make the slice of interest
# except:
# df.columns.values.tolist()
# raise IOError
#=======================================================================
ser = df.loc[:,'bsmt_egrd'] #make the slice of interest
begrd_l = ser.unique().tolist()
logger.debug('looping through %i bsmt_egrds: %s'%(len(begrd_l), begrd_l))
for bsmt_egrd in begrd_l:
att_n = 'b'+bsmt_egrd+'_cnt'
#count the number of occurances
boolar = ser == bsmt_egrd
setattr(self, att_n, int(boolar.sum()))
logger.debug('setting \'%s\' = %i'%(att_n, boolar.sum()))
logger.debug('finished \n')
return
def plot_dmg_pie(self, dmg_sum_ser_raw = None,
exp_str = 1, title = None, wtf=None): #generate a pie chart for the damage
"""
#=======================================================================
# INPUTS
#=======================================================================
dmg_sum_ser: series of damage values (see calc_summary_ser)
index: dmg_types
values: fdmg totals for each type for this flood
exp_main: amoutn to explote structural damage values by
"""
#=======================================================================
# set defaults
#=======================================================================
logger = self.logger.getChild('plot_dmg_pie')
if title == None: title = self.session.tag + ' '+self.name+' ' + 'dmgpie_plot'
if wtf is None: wtf = self.session._write_figs
if dmg_sum_ser_raw == None: #just calculate
dmg_sum_ser_raw = self.dmg_res_df[self.dmg_types].sum()
#dmg_sum_ser_raw = self.calc_summary_ser()
logger.debug('with dmg_sum_ser_raw: \n %s'%dmg_sum_ser_raw)
#=======================================================================
# data cleaning
#=======================================================================
#drop na
dmg_sum_ser1 = dmg_sum_ser_raw.dropna()
#drop zero values
boolidx = dmg_sum_ser1 == 0
dmg_sum_ser2 = dmg_sum_ser1[~boolidx]
if np.all(boolidx):
logger.warning('got zero damages. not pie plot generated')
return
if boolidx.sum() > 0:
logger.warning('dmg_pie dropped %s zero totals'%dmg_sum_ser1.index[boolidx].tolist())
dmg_sum_ser = dmg_sum_ser2
#=======================================================================
# get data
#=======================================================================
#shortcuts
dmg_types = dmg_sum_ser.index.tolist()
labels = dmg_types
sizes = dmg_sum_ser.values.tolist()
#=======================================================================
# #get properties list from the dfunc tab
#=======================================================================
colors = []
explode_list = []
wed_lab_list = []
dfunc_df = self.session.pars_df_d['dfunc']
for dmg_type in dmg_types:
boolidx = dfunc_df['dmg_type'] == dmg_type #id this dmg_type
#color
color = dfunc_df.loc[boolidx,'color'].values[0]
colors.append(color) #add to the list
#explode
explode = dfunc_df.loc[boolidx,'explode'].values[0]
explode_list.append(explode) #add to the list
#wedge_lable
wed_lab = '$' + "{:,.2f}".format(dmg_sum_ser[dmg_type])
wed_lab_list.append(wed_lab)
plt.close()
fig, ax = plt.subplots()
wedges = ax.pie(sizes, explode=explode_list, labels=labels, colors = colors,
autopct=hp.plot.autopct_dollars(sizes),
shadow=True, startangle=90)
ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
ax.set_title(title)
if wtf: #write to file
filetail = self.session.name + ' '+self.name+' ' + 'dmgpie_plot'
filename = os.path.join(self.model.outpath, filetail)
hp.plot.save_fig(self, fig, savepath_raw = filename)
return ax
def plot_dmg_scatter(self, #scatter plot of damage for each house
dmg_df_raw=None, yvar = 'hse_depth', xvar = 'total', plot_zeros=True,
title=None, wtf=None, ax=None,
linewidth = 0, markersize = 3, marker = 'x',
**kwargs):
"""
for complex figures, axes should be passed and returned
#=======================================================================
# INPUTS
#=======================================================================
should really leave this for post processing
plot_zeros: flag to indicate whether entries with x value = 0 should be included
#=======================================================================
# TODO
#=======================================================================
redo this with the plot worker
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('plot_dmg_scatter')
if title == None: title = self.session.tag + ' '+self.name + ' dmg_scatter_plot'
if wtf is None: wtf = self.session._write_figs
if dmg_df_raw == None:
dmg_res_df_raw = self.dmg_res_df #just use the attached one
if not hp.pd.isdf(dmg_res_df_raw): raise IOError
#=======================================================================
# manipulate data for plotting
#=======================================================================
if plot_zeros:
dmg_df = dmg_res_df_raw
else:
#exclude those entries with zero value on the xvar
boolidx = dmg_res_df_raw[xvar] == 0
dmg_df = dmg_res_df_raw[~boolidx]
self.logger.warning('%s values = zero (%i) excluded from plot'%(xvar, boolidx.sum()))
#=======================================================================
# setup data plot
#=======================================================================
x_ar = dmg_df[xvar].values.tolist() #damage
xlab = 'damage($)'
'could make this more dynamic'
if sum(x_ar) <=0:
logger.warning('got no damage. no plot generated')
return
y_ar = dmg_df[yvar].values.tolist() #depth
#=======================================================================
# SEtup defaults
#=======================================================================
if ax == None:
plt.close('all')
fig = plt.figure(2)
fig.set_size_inches(9, 6)
ax = fig.add_subplot(111)
ax.set_title(title)
ax.set_ylabel(yvar + '(m)')
ax.set_xlabel(xlab)
#set limits
#ax.set_xlim(min(x_ar), max(x_ar))
#ax.set_ylim(min(y_ar), max(y_ar))
else:
fig = ax.figure
label = self.name + ' ' + xvar
#=======================================================================
# send teh data for plotting
#=======================================================================
pline = ax.plot(x_ar,y_ar,
label = label,
linewidth = linewidth, markersize = markersize, marker = marker,
**kwargs)
#=======================================================================
# post formatting
#=======================================================================
ax.get_xaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
"""
plt.show()
"""
if wtf: #trigger for saving the fiture
filetail = title
filename = os.path.join(self.model.outpath, filetail)
hp.plot.save_fig(self, fig, savepath_raw = filename, logger=logger)
return pline
class Binv( #class object for a building inventory
hp.data.Data_wrapper,
hp.plot.Plot_o,
hp.sim.Sim_o,
hp.oop.Parent,
hp.oop.Child):
#===========================================================================
# program pars
#===========================================================================
# legacy index numbers
legacy_ind_d = {0:'ID',1:'address',2:'CPID',10:'class', 11:'struct_type', 13:'gis_area', \
18:'bsmt_f', 19:'ff_height', 20:'xcoord',21:'ycoord', 25:'dem_el'}
#column index where the legacy binv transitions to teh new binv
legacy_break_ind = 26
#column names expected in the cleaned binv
exepcted_coln = ['gis_area', 'bsmt_f', 'ff_height',\
'dem_el', 'value', 'ayoc', 'B_f_height',\
'bkflowv_f','sumpump_f', 'genorat_f', 'hse_type', \
'name', 'anchor_el', 'parcel_area']
hse_type_list = ['AA', 'AD', 'BA', 'BC', 'BD', 'CA', 'CC', 'CD'] #classification of building types
#===========================================================================
# user provided
#===========================================================================
legacy_binv_f = True
#===========================================================================
# calculated pars
#===========================================================================
#===========================================================================
# data holders
#===========================================================================
#cnt = 0
hnew_cnt = 0
hAD_cnt = 0
def __init__(self, *vars, **kwargs):
logger = mod_logger.getChild('Binv')
logger.debug('start _init_')
"""Im explicitly attaching the child datobuilder here
dont want to change the syntax of the binv
inspect.isclass(self.kid_class)
"""
self.inherit_parent_ans=set(['mind', 'legacy_binv_f', 'gis_area_max'])
super(Binv, self).__init__(*vars, **kwargs) #initilzie teh baseclass
#=======================================================================
# special inheritance
#=======================================================================
#self.model = self.parent
self.kid_class = House
self.reset_d.update({'hnew_cnt':0, 'hAD_cnt':0})
#=======================================================================
# checks
#=======================================================================
if self.db_f:
if not self.kid_class == House:
raise IOError
if not isinstance(self.reset_d, dict):
raise IOError
if self.model is None:
raise IOError
if not self.model.name == self.parent.name:
raise IOError
#=======================================================================
# special inits
#=======================================================================
self.exepcted_coln = set(self.exepcted_coln + [self.mind]) #expect the mind in the column names as well
self.load_data()
logger.debug('finiished _init_ \n')
return
def load_data(self): #custom data loader
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('load_data')
#test pars
if self.session._parlo_f:
test_trim_row = self.test_trim_row
else: test_trim_row = None
#=======================================================================
# load the file
#=======================================================================
self.filepath = self.get_filepath()
logger.debug('from filepath: %s'%self.filepath)
#load from file
df_raw = hp.pd.load_xls_df(self.filepath, logger=logger, test_trim_row = test_trim_row,
header = 0, index_col = None)
#=======================================================================
# send for cleaning
#=======================================================================
df1 = hp.pd.clean_datapars(df_raw, logger = logger)
"""
hp.pd.v(df3)
"""
#=======================================================================
# clean per the leagacy binv
#=======================================================================
if self.legacy_binv_f:
df2 = self.legacy_clean_df(df1)
else:
df2 = df1
#=======================================================================
# standard clean
#=======================================================================
df3 = self.clean_inv_df(df2)
#=======================================================================
# macro data manipulations
#=======================================================================
#add names column
if not 'name' in df3.columns:
df3['name'] = 'h' + df3.loc[:, self.mind].astype(np.string_) #format as strings
#add anchor el
if not 'anchor_el' in df3.columns:
df3['anchor_el'] = df3['dem_el'] + df3['ff_height']
df3['anchor_el'] = df3['anchor_el'].astype(np.float)
#=======================================================================
# checking
#=======================================================================
if self.db_f: self.check_binv_df(df3)
#=======================================================================
# wrap up
#=======================================================================
self.childmeta_df = df3.copy()
#shortcut lists
self.bid_l = df3[self.mind].astype(np.int).values.tolist()
self.hse_types_l = df3['hse_type'].unique().tolist()
logger.info('attached binv_df with %s'%str(df3.shape))
return
"""
hp.pd.v(df3)
"""
def legacy_clean_df(self, df_raw): #compile data from legacy (rfda) inventory syntax
"""
pulling column headers from the dictionary of location keys
creating some new headers as combinations of this
"""
#=======================================================================
# setup
#=======================================================================
logger = self.logger.getChild('legacy_clean_df')
d = self.legacy_ind_d
#=======================================================================
# split the df into legacy and non
#=======================================================================
df_leg_raw = df_raw.iloc[:,0:self.legacy_break_ind]
df_new = df_raw.iloc[:,self.legacy_break_ind+1:]
#=======================================================================
# clean the legacy frame
#=======================================================================
#change all the column names
df_leg1 = df_leg_raw.copy()
""" couldnt get this to work
df_leg1.rename(mapper=d, index = 'column')"""
for colind, coln in enumerate(df_leg_raw.columns):
if not colind in d.keys():continue
df_leg1.rename(columns = {coln:d[colind]}, inplace=True)
logger.debug('renamed \'%s\' to \'%s\''%(coln,d[colind] ))
#trim down to these useful columns
boolcol = df_leg1.columns.isin(d.values()) #identify columns in the translation dictionary
df_leg2 = df_leg1.loc[:,boolcol]
logger.debug('trimmed legacy binv from %i to %i cols'%(len(df_leg_raw.columns), boolcol.sum()))
#=======================================================================
# add back the new frame
#=======================================================================
df_merge = df_leg2.join(df_new)
#=======================================================================
# house t ype
#=======================================================================
df_merge.loc[:,'hse_type'] = df_leg2.loc[:,'class'] + df_leg2.loc[:,'struct_type']
logger.debug('cleaned the binv from %s to %s'%(str(df_raw.shape), str(df_merge.shape)))
if self.db_f:
if not len(df_merge) == len(df_raw):
raise IOError
if np.any( | pd.isnull(df_merge['hse_type']) | pandas.isnull |
# Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Model Serving
# MAGIC
# MAGIC There are many deployment options for machine learning models. This notebook explores a more complex deployment scenario involving the real time deployment of a convolutional neural network using REST and Databricks MLflow Model Serving.
# MAGIC
# MAGIC ##  In this lesson you:<br>
# MAGIC - Create a **`pyfunc`** to serve a **`keras`** model with pre and post processing logic
# MAGIC - Save the **`pyfunc`** for downstream consumption
# MAGIC - Serve the model using a REST endpoint
# MAGIC
# MAGIC **NOTE:** *You need <a href="https://docs.databricks.com/applications/mlflow/model-serving.html#requirements" target="_blank">cluster creation</a> permissions to create a model serving endpoint. The instructor will either demo this notebook or enable cluster creation permission for the students from the Admin console.*
# COMMAND ----------
# MAGIC %run "./Includes/Classroom-Setup"
# COMMAND ----------
# MAGIC %md ## Model Serving in Databricks
# MAGIC
# MAGIC The MLflow model registry in Databricks is now integrated with MLflow Model Serving. This is currently intended for development use cases and is therefore not intended for production. In this module, you will create a wrapper class around a **`keras`** model that provides custom pre and post processing logic necessary for this more complex deployment scenario.
# MAGIC
# MAGIC For additional background, see the following resources:
# MAGIC
# MAGIC - <a href="https://databricks.com/blog/2020/06/25/announcing-mlflow-model-serving-on-databricks.html" target="_blank">Databricks blog on model serving</a>
# MAGIC - <a href="https://github.com/mlflow/mlflow/tree/master/examples/flower_classifier" target="_blank">Example of an image classifier</a>
# MAGIC - <a href="https://www.mlflow.org/docs/latest/models.html#example-saving-an-xgboost-model-in-mlflow-format" target="_blank">Example of a custom loader used with XGBoost</a>
# COMMAND ----------
# MAGIC %md ## Creating a Wrapper Class using **`pyfunc`**
# COMMAND ----------
# MAGIC %md Create a **`keras`** model using a reference architecture and pretrained weights.
# COMMAND ----------
import tensorflow as tf
tf.random.set_seed(42)
model = tf.keras.applications.VGG16(weights="imagenet")
model.summary()
# COMMAND ----------
# MAGIC %md Create a small dataset to test the model. This is two images of cats.
# COMMAND ----------
import pandas as pd
import base64
filenames = [f"{datasets_dir}/dl/img/cats/cats2.jpg".replace("dbfs:/", "/dbfs/"),
f"{datasets_dir}/dl/img/cats/cats4.jpg".replace("dbfs:/", "/dbfs/")]
def read_image(path: str) -> bytes:
"""Reads an image from a path and returns the contents in bytes"""
with open(path, "rb") as f:
image_bytes = f.read()
return image_bytes
data = pd.DataFrame(data=[base64.encodebytes(read_image(x)) for x in filenames], columns=["image"])
data
# COMMAND ----------
# MAGIC %md Save the model using **`mlflow`**.
# COMMAND ----------
import mlflow
import mlflow.keras
import uuid
model_name = f"keras_model_{uuid.uuid4().hex[:6]}"
with mlflow.start_run() as run:
mlflow.keras.log_model(artifact_path=model_name, keras_model=model)
model_uri = f"runs:/{run.info.run_id}/{model_name}"
print(f"Model saved to {model_uri}")
# COMMAND ----------
# MAGIC %md Create a wrapper class that includes the following as a **`pyfunc`**:
# MAGIC
# MAGIC - A **`load_context`** method to load in the model.
# MAGIC - Custom featurization logic that parses base64 encoded images (necessary for HTTP requests)
# MAGIC - Custom prediction logic that reports the top class and its probability
# COMMAND ----------
import mlflow
class KerasImageClassifierPyfunc(mlflow.pyfunc.PythonModel):
def __init__(self):
self.model = None
self.img_height = 224
self.img_width = 224
def load_context(self, context=None, path=None):
"""
When loading a pyfunc, this method runs automatically with the related
context. This method is designed to load the keras model from a path
if it is running in a notebook or use the artifact from the context
if it is loaded with mlflow.pyfunc.load_model()
"""
import numpy as np
import tensorflow as tf
if context: # This block executes for server run
model_path = context.artifacts["keras_model"]
else: # This block executes for notebook run
model_path = path
self.model = mlflow.keras.load_model(model_path)
def predict_from_bytes(self, image_bytes):
"""
Applied across numpy representations of the model input, this method
uses the appropriate decoding based upon whether it is run in the
notebook or on a server
"""
import base64
try: # This block executes for notebook run
image_bytes_decoded = base64.decodebytes(image_bytes)
img_array = tf.image.decode_image(image_bytes_decoded)
except: # This block executes for server run
img_array = tf.image.decode_image(image_bytes)
img_array = tf.image.resize(img_array, (self.img_height, self.img_width))
img_array = tf.expand_dims(img_array, 0)
prediction = self.model.predict(img_array)
return prediction[0]
def postprocess_raw_predictions(self, raw_prediction):
"""
Post processing logic to render predictions in a human readable form
"""
from tensorflow.keras.applications.vgg16 import decode_predictions
res = decode_predictions(raw_prediction, top=3)
str_template = "Best response of {best} with probability of {p}"
return [str_template.format(best=i[0][1], p=i[0][2]) for i in res]
def predict(self, context=None, model_input=None):
"""
Wrapper predict method
"""
n_records = model_input.shape[0]
input_numpy = model_input.values
raw_predictions = np.vectorize(self.predict_from_bytes, otypes=[np.ndarray])(input_numpy)
raw_predictions = np.array(raw_predictions.tolist()).reshape([n_records, 1000])
decoded_predictions = self.postprocess_raw_predictions(raw_predictions)
decoded_predictions = | pd.DataFrame(decoded_predictions, columns=["prediction"]) | pandas.DataFrame |
import numpy as np
import pandas as pd
from analysis.transform_fast import load_raw_cohort, transform
def test_immuno_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF IMMRX_DAT <> NULL | Select | Next
if pd.notnull(row["immrx_dat"]):
assert row["immuno_group"]
continue
# IF IMMDX_COV_DAT <> NULL | Select | Reject
if pd.notnull(row["immdx_cov_dat"]):
assert row["immuno_group"]
else:
assert not row["immuno_group"]
def test_ckd_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF CKD_COV_DAT <> NULL (diagnoses) | Select | Next
if pd.notnull(row["ckd_cov_dat"]):
assert row["ckd_group"]
continue
# IF CKD15_DAT = NULL (No stages) | Reject | Next
if pd.isnull(row["ckd15_dat"]):
assert not row["ckd_group"]
continue
# IF CKD35_DAT>=CKD15_DAT | Select | Reject
if gte(row["ckd35_dat"], row["ckd15_dat"]):
assert row["ckd_group"]
else:
assert not row["ckd_group"]
def test_ast_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF ASTADM_DAT <> NULL | Select | Next
if pd.notnull(row["astadm_dat"]):
assert row["ast_group"]
continue
# IF AST_DAT <> NULL | Next | Reject
if pd.isnull(row["ast_dat"]):
assert not row["ast_group"]
continue
# IF ASTRXM1 <> NULL | Next | Reject
if pd.isnull(row["astrxm1_dat"]):
assert not row["ast_group"]
continue
# IF ASTRXM2 <> NULL | Next | Reject
if pd.isnull(row["astrxm2_dat"]):
assert not row["ast_group"]
continue
# IF ASTRXM3 <> NULL | Select | Reject
if pd.notnull(row["astrxm3_dat"]):
assert row["ast_group"]
else:
assert not row["ast_group"]
def test_cns_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF CNS_COV_DAT <> NULL | Select | Reject
if pd.notnull(row["cns_cov_dat"]):
assert row["cns_group"]
else:
assert not row["cns_group"]
def test_resp_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF AST_GROUP <> NULL | Select | Next
if row["ast_group"]:
assert row["resp_group"]
continue
# IF RESP_COV_DAT <> NULL | Select | Reject
if pd.notnull(row["resp_cov_dat"]):
assert row["resp_group"]
else:
assert not row["resp_group"]
def test_bmi_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF SEV_OBESITY_DAT > BMI_DAT | Select | Next
if gt(row["sev_obesity_dat"], row["bmi_dat"]):
assert row["bmi_group"]
continue
# IF BMI_VAL >=40 | Select | Reject
if gte(row["bmi_val"], 40):
assert row["bmi_group"]
else:
assert not row["bmi_group"]
def test_diab_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF DIAB_DAT > DMRES_DAT | Select | Reject
if gt(row["diab_dat"], row["dmres_dat"]):
assert row["diab_group"]
else:
assert not row["diab_group"]
def test_sevment_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF SEV_MENTAL_DAT > SMHRES_DAT | Select | Reject
if gt(row["sev_mental_dat"], row["smhres_dat"]):
assert row["sevment_group"]
else:
assert not row["sevment_group"]
def test_atrisk_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF IMMUNOGROUP <> NULL | Select | Next
if row["immuno_group"]:
assert row["atrisk_group"]
continue
# IF CKD_GROUP <> NULL | Select | Next
if row["ckd_group"]:
assert row["atrisk_group"]
continue
# IF RESP_GROUP <> NULL | Select | Next
if row["resp_group"]:
assert row["atrisk_group"]
continue
# IF DIAB_GROUP <> NULL | Select | Next
if row["diab_group"]:
assert row["atrisk_group"]
continue
# IF CLD_DAT <>NULL | Select | Next
if pd.notnull(row["cld_dat"]):
assert row["atrisk_group"]
continue
# IF CNS_GROUP <> NULL | Select | Next
if row["cns_group"]:
assert row["atrisk_group"]
continue
# IF CHD_COV_DAT <> NULL | Select | Next
if pd.notnull(row["chd_cov_dat"]):
assert row["atrisk_group"]
continue
# IF SPLN_COV_DAT <> NULL | Select | Next
if pd.notnull(row["spln_cov_dat"]):
assert row["atrisk_group"]
continue
# IF LEARNDIS_DAT <> NULL | Select | Next
if pd.notnull(row["learndis_dat"]):
assert row["atrisk_group"]
continue
# IF SEVMENT_GROUP <> NULL | Select | Reject
if row["sevment_group"]:
assert row["atrisk_group"]
else:
assert not row["atrisk_group"]
def test_covax1d_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF COVRX1_DAT <> NULL | Select | Next
if pd.notnull(row["covrx1_dat"]):
assert row["covax1d_group"]
continue
# IF COVADM1_DAT <> NULL | Select | Reject
if pd.notnull(row["covadm1_dat"]):
assert row["covax1d_group"]
else:
assert not row["covax1d_group"]
def test_covax2d_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF COVAX1D_GROUP <> NULL | Next | Reject
if not row["covax1d_group"]:
assert not row["covax2d_group"]
continue
# IF COVRX2_DAT <> NULL | Select | Next
if pd.notnull(row["covrx2_dat"]):
assert row["covax2d_group"]
continue
# IF COVADM2_DAT <> NULL | Select | Reject
if | pd.notnull(row["covadm2_dat"]) | pandas.notnull |
# this is written to retrive airnow data concatenate and add to pandas array
# for usage
from builtins import object, str
from datetime import datetime
import pandas as pd
try:
from joblib import Parallel, delayed
has_joblib = True
except:
has_joblib = False
def dateparse(x):
return datetime.strptime(x, '%d:%m:%Y %H:%M:%S')
def add_local(fname, dates=None, latlonbox=None, freq=None, calc_550=False):
a = AERONET()
a.url = fname
df = a.read_aeronet(fname)
if freq is not None:
df = sdf.groupby('siteid').resample(freq).mean().reset_index()
return df
def add_data(dates=None,
product='AOD15',
latlonbox=None,
daily=False,
calc_550=True,
inv_type=None,
freq=None,
siteid=None,
detect_dust=False, n_procs=1, verbose=10):
a = AERONET()
if has_joblib and (n_procs > 1):
min_date = dates.min()
max_date = dates.max()
# find days from here to there
days = pd.date_range(start=min_date, end=max_date, freq='D')
days1 = pd.date_range(start=min_date, end=max_date, freq='D') + pd.Timedelta(1, unit='D')
vars = dict(product=product, latlonbox=latlonbox, daily=daily, calc_550=calc_550, inv_type=inv_type, siteid=siteid, freq=None, detect_dust=detect_dust)
dfs = Parallel(n_jobs=n_procs, verbose=verbose)(delayed(_parallel_aeronet_call)(pd.DatetimeIndex([d1, d2]), **vars) for d1, d2 in zip(days, days1))
df = pd.concat(dfs, ignore_index=True).drop_duplicates()
if freq is not None:
df.index = df.time
df = df.groupby('siteid').resample(freq).mean().reset_index()
return df.reset_index(drop=True)
else:
if ~has_joblib and (n_procs > 1):
print('Please install joblib to use the parallel feature of monetio.aeronet. Proceeding in serial mode...')
df = a.add_data(dates=dates,
product=product,
latlonbox=latlonbox,
daily=daily,
calc_550=calc_550,
inv_type=inv_type,
siteid=siteid,
freq=freq,
detect_dust=detect_dust)
return df.reset_index(drop=True)
def _parallel_aeronet_call(dates=None,
product='AOD15',
latlonbox=None,
daily=False,
calc_550=True,
inv_type=None,
freq=None,
siteid=None,
detect_dust=False):
a = AERONET()
df = a.add_data(dates, product=product, latlonbox=latlonbox,
daily=daily,
calc_550=calc_550,
inv_type=inv_type,
siteid=siteid,
freq=freq,
detect_dust=detect_dust)
return df
class AERONET(object):
def __init__(self):
from numpy import concatenate, arange
self.baseurl = 'https://aeronet.gsfc.nasa.gov/cgi-bin/print_web_data_v3?'
self.dates = [
datetime.strptime('2016-06-06 12:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2016-06-10 13:00:00', '%Y-%m-%d %H:%M:%S')
]
self.datestr = []
self.df = pd.DataFrame()
self.daily = None
self.prod = None
self.inv_type = None
self.siteid = None
self.objtype = 'AERONET'
self.usecols = concatenate((arange(30), arange(65, 83)))
# [21.1,-131.6686,53.04,-58.775] #[latmin,lonmin,latmax,lonmax]
self.latlonbox = None
self.url = None
def build_url(self):
sy = self.dates.min().strftime('%Y')
sm = self.dates.min().strftime('%m').zfill(2)
sd = self.dates.min().strftime('%d').zfill(2)
sh = self.dates.min().strftime('%H').zfill(2)
ey = self.dates.max().strftime('%Y').zfill(2)
em = self.dates.max().strftime('%m').zfill(2)
ed = self.dates.max().strftime('%d').zfill(2)
eh = self.dates.max().strftime('%H').zfill(2)
if self.prod in [
'AOD10', 'AOD15', 'AOD20', 'SDA10', 'SDA15', 'SDA20', 'TOT10',
'TOT15', 'TOT20'
]:
base_url = 'https://aeronet.gsfc.nasa.gov/cgi-bin/print_web_data_v3?'
inv_type = None
else:
base_url = 'https://aeronet.gsfc.nasa.gov/cgi-bin/print_web_data_inv_v3?'
if self.inv_type == 'ALM15':
inv_type = '&ALM15=1'
else:
inv_type = '&AML20=1'
date_portion = 'year=' + sy + '&month=' + sm + '&day=' + sd + \
'&hour=' + sh + '&year2=' + ey + '&month2=' + em + '&day2=' + ed +\
'&hour2=' + eh
# print(self.prod, inv_type)
if self.inv_type is not None:
product = '&product=' + self.prod
else:
product = '&' + self.prod + '=1'
self.inv_type = ''
time = '&AVG=' + str(self.daily)
if self.siteid is not None:
latlonbox = '&site={}'.format(self.siteid)
elif self.latlonbox is None:
latlonbox = ''
else:
lat1 = str(float(self.latlonbox[0]))
lon1 = str(float(self.latlonbox[1]))
lat2 = str(float(self.latlonbox[2]))
lon2 = str(float(self.latlonbox[3]))
latlonbox = '&lat1=' + lat1 + '&lat2=' + \
lat2 + '&lon1=' + lon1 + '&lon2=' + lon2
# print(base_url)
# print(date_portion)
# print(product)
# print(inv_type)
# print(time)
# print(latlonbox)
if inv_type is None:
inv_type = ''
self.url = base_url + date_portion + product + \
inv_type + time + latlonbox + '&if_no_html=1'
def read_aeronet(self):
print('Reading Aeronet Data...')
# header = self.get_columns()
df = pd.read_csv(self.url,
engine='python',
header=None,
skiprows=6,
parse_dates={'time': [1, 2]},
date_parser=dateparse,
na_values=-999)
# df.rename(columns={'date_time': 'time'}, inplace=True)
columns = self.get_columns()
df.columns = columns # self.get_columns()
df.index = df.time
df.rename(columns={
'site_latitude(degrees)': 'latitude',
'site_longitude(degrees)': 'longitude',
'site_elevation(m)': 'elevation',
'aeronet_site': 'siteid'
},
inplace=True)
df.dropna(subset=['latitude', 'longitude'], inplace=True)
df.dropna(axis=1, how='all', inplace=True)
self.df = df
def get_columns(self):
header = pd.read_csv(self.url, skiprows=5, header=None,
nrows=1).values.flatten()
final = ['time']
for i in header:
if "Date(" in i or 'Time(' in i:
pass
else:
final.append(i.lower())
return final
def add_data(self,
dates=None,
product='AOD15',
latlonbox=None,
daily=False,
calc_550=True,
inv_type=None,
freq=None,
siteid=None,
detect_dust=False):
self.latlonbox = latlonbox
self.siteid = siteid
if dates is None: # get the current day
self.dates = pd.date_range(start= | pd.to_datetime('today') | pandas.to_datetime |
#!python
##################################################
# ACCESS QC Module
# Innovation Laboratory
# Center For Molecular Oncology
# Memorial Sloan Kettering Cancer Research Center
# maintainer: <NAME> (<EMAIL>)
#
#
# This module functions as an aggregation step to combine QC metrics
# across Waltz runs on different bam types.
import shutil
import logging
import argparse
import numpy as np
import pandas as pd
from python_tools.constants import *
from python_tools.util import to_csv
def unique_or_tot(x):
if TOTAL_LABEL in x:
return TOTAL_LABEL
else:
return PICARD_LABEL
def get_read_counts_table(path, pool):
"""
This method is only used to generate stats for un-collapsed bams
"""
read_counts_path = os.path.join(path, AGBM_READ_COUNTS_FILENAME)
read_counts = pd.read_csv(read_counts_path, sep='\t')
# Melt our DF to get all values of the on target rate and duplicate rates as values
read_counts = pd.melt(read_counts, id_vars=[SAMPLE_ID_COLUMN], var_name='Category')
# We only want the read counts-related row values
read_counts = read_counts[~read_counts['Category'].isin(['bam', TOTAL_READS_COLUMN, UNMAPPED_READS_COLUMN, 'duplicate_fraction'])]
read_counts['method'] = read_counts['Category'].apply(unique_or_tot)
read_counts['pool'] = pool
# read_counts = read_counts.reset_index(drop=True)
return read_counts
def get_read_counts_total_table(path, pool):
"""
This table is used for "Fraction of Total Reads that Align to the Human Genome" plot
"""
full_path = os.path.join(path, AGBM_READ_COUNTS_FILENAME)
read_counts_total = pd.read_csv(full_path, sep='\t')
col_idx = ~read_counts_total.columns.str.contains(PICARD_LABEL)
read_counts_total = read_counts_total.iloc[:, col_idx]
read_counts_total['AlignFrac'] = read_counts_total[TOTAL_MAPPED_COLUMN] / read_counts_total[TOTAL_READS_COLUMN]
read_counts_total[TOTAL_OFF_TARGET_FRACTION_COLUMN] = 1 - read_counts_total[TOTAL_ON_TARGET_FRACTION_COLUMN]
read_counts_total['pool'] = pool
return read_counts_total
def get_coverage_table(path, pool):
"""
Coverage table
"""
full_path = os.path.join(path, AGBM_COVERAGE_FILENAME)
coverage_table = pd.read_csv(full_path, sep='\t')
coverage_table = pd.melt(coverage_table, id_vars=SAMPLE_ID_COLUMN, var_name='method', value_name='average_coverage')
coverage_table['method'] = coverage_table['method'].str.replace('average_coverage_', '')
coverage_table['pool'] = pool
return coverage_table
def get_collapsed_waltz_tables(path, method, pool):
"""
Creates read_counts, coverage, and gc_bias tables for collapsed bam metrics.
"""
read_counts_table_path = os.path.join(path, AGBM_READ_COUNTS_FILENAME)
read_counts_table = pd.read_csv(read_counts_table_path, sep='\t')
read_counts_table = pd.melt(read_counts_table, id_vars=[SAMPLE_ID_COLUMN], var_name='Category')
read_counts_table = read_counts_table.dropna(axis=0)
read_counts_table['method'] = [method] * len(read_counts_table)
read_counts_table['pool'] = pool
# Todo: merge with get_cov_table
coverage_table_path = '/'.join([path, AGBM_COVERAGE_FILENAME])
coverage_table = pd.read_csv(coverage_table_path, sep='\t', usecols=[0, 1], names=[SAMPLE_ID_COLUMN, 'average_coverage'], header=0)
coverage_table['method'] = [method] * len(coverage_table)
coverage_table['pool'] = pool
gc_bias_table = get_gc_table(method, WALTZ_INTERVALS_FILENAME_SUFFIX, path)
return [read_counts_table, coverage_table, gc_bias_table]
def get_gc_table(curr_method, intervals_filename_suffix, path):
"""
Function to create GC content table
"""
gc_with_cov = pd.DataFrame(columns=GC_BIAS_HEADER)
sample_files = [f for f in os.listdir(path) if intervals_filename_suffix in f]
for sample in sample_files:
filename = os.path.join(path, sample)
curr_table = pd.read_csv(filename, names=WALTZ_INTERVALS_FILE_HEADER, sep='\t')
sample = sample.split('_cl_aln_srt')[0]
newDf = curr_table[[WALTZ_INTERVAL_NAME_COLUMN, WALTZ_PEAK_COVERAGE_COLUMN, WALTZ_GC_CONTENT_COLUMN]].copy()
newDf['method'] = curr_method
newDf[SAMPLE_ID_COLUMN] = sample
gc_with_cov = pd.concat([gc_with_cov, newDf]).sort_values([SAMPLE_ID_COLUMN, WALTZ_INTERVAL_NAME_COLUMN])
return gc_with_cov
def get_bins(tbl):
"""
Create bins from min_gc value to max_gc value in increments of 0.05 (for GC content table)
"""
logging.info('GC table generation')
logging.info(tbl)
min_gc = np.min(tbl['gc'])
max_gc = np.max(tbl['gc'])
start = round(min_gc - np.mod(min_gc, 0.05), 2)
stop = round(max_gc + 0.1 - np.mod(max_gc, 0.05), 2)
all_bins = np.arange(start, stop, step=0.05)
return all_bins
def get_gc_table_average_for_each_sample(tbl):
"""
Creates the GC content table, with each sample represented
"""
tbl = tbl.copy()
# Restrict to just 0.3 --> 0.8 %GC
all_bins = np.arange(0.3, 0.85, 0.05)
tbl[GC_BIN_COLUMN] = pd.cut(tbl['gc'], all_bins)
# Create new column of normalized coverage across intervals, for each combination of sample and method
groups = [METHOD_COLUMN, SAMPLE_ID_COLUMN]
grouped = tbl.groupby(groups)['peak_coverage']
tbl['coverage_norm'] = grouped.transform(lambda x: x / x.mean())
# Upgrading to newer pandas requires us to restrict transform operations to only rows with non-NA values
tbl = tbl[~tbl[GC_BIN_COLUMN].isnull()]
# Calculate mean coverage within each GC bin, after standardizing coverage across whole sample
groups = [METHOD_COLUMN, SAMPLE_ID_COLUMN, GC_BIN_COLUMN]
grouped = tbl.groupby(groups)['coverage_norm']
tbl['coverage_norm_2'] = grouped.transform(lambda x: x.mean())
tbl = tbl[[SAMPLE_ID_COLUMN, 'coverage_norm_2', GC_BIN_COLUMN, METHOD_COLUMN]].copy()
tbl = tbl.drop_duplicates()
tbl = tbl.rename(index=str, columns={'coverage_norm_2': 'coverage'})
tbl = tbl[~tbl.isnull().any(axis=1)]
return tbl
def get_gene_and_probe(interval):
gene_interval_regex = re.compile(r'^.*_.*_.*_.*$')
# Example interval string: exon_AKT1_4a_1
if interval[0:4] == 'exon':
split = interval.split('_')
return split[1], split[2] + '_' + split[3]
# Another example I've encountered: 426_2903_324(APC)_1a
elif gene_interval_regex.match(interval):
split = interval.split('_')
return '_'.join(split[0:2]), '_'.join(split[2:4])
else:
gene, exon = interval.split('_exon_')
return gene, exon
def get_coverage_per_interval(tbl):
"""
Creates table of collapsed coverage per interval
"""
# Coverage per interval Graph comes from unfiltered Bam, Pool A Targets
unfiltered_boolv = (tbl['method'] == UNFILTERED_COLLAPSING_METHOD)
# Filter out MSI & Fingerprinting intervals
exon_boolv = ['exon' in y for y in tbl[WALTZ_INTERVAL_NAME_COLUMN]]
relevant_coverage_columns = [WALTZ_PEAK_COVERAGE_COLUMN, WALTZ_INTERVAL_NAME_COLUMN, SAMPLE_ID_COLUMN]
final_tbl = tbl[unfiltered_boolv & exon_boolv][relevant_coverage_columns]
# Add on new gene and probe columns
gene_probe = [get_gene_and_probe(val) for val in final_tbl[WALTZ_INTERVAL_NAME_COLUMN]]
gene_probe_df = pd.DataFrame(gene_probe, columns=['Gene', 'Probe'])
# Todo: most likely, the reset_index() calls are unnecessary
final_tbl = final_tbl.reset_index(drop=True)
final_tbl = pd.concat([final_tbl, gene_probe_df], axis=1)
final_tbl = final_tbl.reset_index(drop=True)
return final_tbl
def get_coverage_per_interval_exon_level(tbl):
"""
Exon-Level Coverage per Interval Graph comes from Duplex Bam, Pool A Targets
"""
total_boolv = (tbl['method'] == DUPLEX_COLLAPSING_METHOD)
final_tbl = tbl[total_boolv]
return final_tbl
########
# Main #
########
def main():
"""
This method is kept separate to allow for testing of the create_combined_qc_tables() method,
using a mock argparse object
:return:
"""
parser = argparse.ArgumentParser(description='MSK ACCESS QC module', formatter_class=argparse.RawTextHelpFormatter)
# Probe-level QC files, A-Targets
parser.add_argument('-swa', '--standard_waltz_pool_a', type=str, default=None, required=True, action=FullPaths)
parser.add_argument('-mua', '--unfiltered_waltz_pool_a', type=str, default=None, action=FullPaths)
parser.add_argument('-msa', '--simplex_waltz_pool_a', type=str, default=None, action=FullPaths)
parser.add_argument('-mda', '--duplex_waltz_pool_a', type=str, default=None, action=FullPaths)
# Probe-level QC files, B-Targets
parser.add_argument('-swb', '--standard_waltz_pool_b', type=str, default=None, required=True, action=FullPaths)
parser.add_argument('-mub', '--unfiltered_waltz_pool_b', type=str, default=None, action=FullPaths)
parser.add_argument('-msb', '--simplex_waltz_pool_b', type=str, default=None, action=FullPaths)
parser.add_argument('-mdb', '--duplex_waltz_pool_b', type=str, default=None, action=FullPaths)
# Exon-level QC files, A-Targets
parser.add_argument('-swael', '--standard_waltz_metrics_pool_a_exon_level', type=str, default=None, action=FullPaths)
parser.add_argument('-muael', '--unfiltered_waltz_metrics_pool_a_exon_level', type=str, default=None, action=FullPaths)
parser.add_argument('-msael', '--simplex_waltz_metrics_pool_a_exon_level', type=str, default=None, action=FullPaths)
parser.add_argument('-mdael', '--duplex_waltz_metrics_pool_a_exon_level', type=str, default=None, action=FullPaths)
args = parser.parse_args()
create_combined_qc_tables(args)
def copy_fragment_sizes_files(args):
"""
Copy the fragment-sizes.txt files from the Waltz output folders, and create a combined table for all bam types
Fragment Sizes graph comes from Unfiltered Bam, Pool A Targets
Todo: not clean
:param args:
:return:
"""
fragment_sizes_files = [
(args.standard_waltz_pool_a, 'Standard_A'),
(args.unfiltered_waltz_pool_a, 'Unfiltered_A'),
(args.simplex_waltz_pool_a, 'Simplex_A'),
(args.duplex_waltz_pool_a, 'Duplex_A'),
(args.standard_waltz_pool_b, 'Standard_B'),
(args.unfiltered_waltz_pool_b, 'Unfiltered_B'),
(args.simplex_waltz_pool_b, 'Simplex_B'),
(args.duplex_waltz_pool_b, 'Duplex_B'),
]
fragment_sizes_files = [(outname, x[0], x[1]) for outname, x in zip(INSERT_SIZE_OUTPUT_FILE_NAMES, fragment_sizes_files)]
for dst, src, type in fragment_sizes_files:
# Copy to current directory of all aggregated QC info
frag_sizes_path = os.path.join(src, 'fragment-sizes.txt')
# Create combined DataFrame for A and B targets
fragment_sizes_df = pd.read_csv(frag_sizes_path, sep='\t')
fragment_sizes_df = fragment_sizes_df[['FragmentSize', 'TotalFrequency', SAMPLE_ID_COLUMN]]
fragment_sizes_df = fragment_sizes_df.pivot('FragmentSize', SAMPLE_ID_COLUMN, 'TotalFrequency')
# Add in missing rows for insert sizes that weren't represented
new_index = pd.Index(np.arange(1, 800), name='FragmentSize')
fragment_sizes_df = fragment_sizes_df.reindex(new_index).reset_index()
# Replace nan's with 0
fragment_sizes_df = fragment_sizes_df.fillna(0)
to_csv(fragment_sizes_df,os.path.join('.', dst))
def reformat_exon_targets_coverage_file(coverage_per_interval_table):
"""
DMP-specific format for coverage_per_interval_table file
# Todo:
# 1. Need to use average_coverage, not peak_coverage
:param coverage_per_interval_table:
:return:
"""
for method in coverage_per_interval_table[METHOD_COLUMN].unique():
subset = coverage_per_interval_table[coverage_per_interval_table['method'] == method]
subset = subset.pivot('interval_name', SAMPLE_ID_COLUMN, 'peak_coverage')
subset = subset.reset_index().rename(columns={subset.index.name: 'interval_name'})
interval_names_split = subset['interval_name'].str.split(':', expand=True)
# Turn interval_name into Interval and TargetName
subset.insert(0, 'TargetName', interval_names_split.iloc[:,0] + '_' + interval_names_split.iloc[:,2])
subset.insert(0, 'Interval', interval_names_split.iloc[:,3] + ':' + interval_names_split.iloc[:,4])
subset = subset.drop('interval_name', axis=1)
to_csv(subset, 'coverage_per_interval_A_targets_{}.txt'.format(method.replace(' ', '_')))
def create_combined_qc_tables(args):
"""
Read in and concatenate all the tables from their respective waltz output folders
Write these tables to the current directory
:param args: argparse.ArgumentParser with parsed arguments
:return:
"""
read_counts_total_pool_a_table = get_read_counts_total_table(args.standard_waltz_pool_a, POOL_A_LABEL)
read_counts_total_pool_b_table = get_read_counts_total_table(args.standard_waltz_pool_b, POOL_B_LABEL)
read_counts_total_table = | pd.concat([read_counts_total_pool_a_table, read_counts_total_pool_b_table]) | pandas.concat |
from copy import deepcopy
from pathlib import Path
import pandas as pd
from pymongo import MongoClient
import projectconfig
from relnet.evaluation.file_paths import FilePaths
from relnet.state.network_generators import get_graph_ids_to_iterate
class EvaluationStorage:
MONGO_EXPERIMENT_COLLECTION = 'experiment_data'
MONGO_EVALUATION_COLLECTION = 'evaluation_data'
def __init__(self):
config = projectconfig.get_project_config()
self.mongo_client = MongoClient(config.BACKEND_URL)
self.db = self.mongo_client[config.MONGODB_DATABASE_NAME]
def find_latest_experiment_id(self):
result = self.db[self.MONGO_EXPERIMENT_COLLECTION].find().sort([("started_millis", -1)]).limit(1)[0]["experiment_id"]
return result
def get_hyperparameter_optimisation_data(self,
experiment_id,
model_seeds_to_skip,
train_individually):
latest_experiment = self.get_experiment_details(experiment_id)
file_paths = latest_experiment["file_paths"]
experiment_conditions = latest_experiment["experiment_conditions"]
hyperopt_data = []
network_generators = latest_experiment["network_generators"]
objective_functions = latest_experiment["objective_functions"]
agent_names = latest_experiment["agents"]
param_spaces = latest_experiment["parameter_search_spaces"]
for objective_function in objective_functions:
for agent_name in agent_names:
agent_grid = param_spaces[objective_function][agent_name]
search_space_keys = list(agent_grid.keys())
for hyperparams_id in search_space_keys:
for seed in experiment_conditions['experiment_params']['model_seeds']:
for network_generator in network_generators:
graph_ids_to_iterate = get_graph_ids_to_iterate(train_individually, network_generator, file_paths)
for graph_id in graph_ids_to_iterate:
setting = (network_generator, objective_function, agent_name, graph_id)
if setting in model_seeds_to_skip:
if seed in model_seeds_to_skip[setting]:
print(f"Skipping seed {seed} when computing optimal hyperparams.")
continue
model_prefix = FilePaths.construct_model_identifier_prefix(agent_name,
objective_function,
network_generator,
seed,
hyperparams_id,
graph_id=graph_id)
hyperopt_result_filename = FilePaths.construct_best_validation_file_name(model_prefix)
hyperopt_result_path = Path(file_paths['hyperopt_results_dir'], hyperopt_result_filename)
if hyperopt_result_path.exists():
with hyperopt_result_path.open('r') as f:
avg_eval_reward = float(f.readline())
hyperopt_data_row = {"network_generator": network_generator,
"objective_function": objective_function,
"agent_name": agent_name,
"hyperparams_id": hyperparams_id,
"avg_reward": avg_eval_reward,
"graph_id": graph_id}
hyperopt_data.append(hyperopt_data_row)
return param_spaces, pd.DataFrame(hyperopt_data)
def retrieve_optimal_hyperparams(self,
experiment_id,
model_seeds_to_skip,
train_individually):
param_spaces, df = self.get_hyperparameter_optimisation_data(experiment_id,
model_seeds_to_skip,
train_individually)
if not train_individually:
df = df.drop(columns='graph_id')
avg_rewards_df = df.groupby(list(set(df.columns) - {"avg_reward"})).mean().reset_index()
gb_cols = list(set(avg_rewards_df.columns) - {"avg_reward", "hyperparams_id"})
avg_rewards_max = avg_rewards_df.loc[avg_rewards_df.groupby(gb_cols)["avg_reward"].idxmax()].reset_index(
drop=True)
optimal_hyperparams = {}
for row in avg_rewards_max.itertuples():
if not train_individually:
setting = row.network_generator, row.objective_function, row.agent_name
else:
setting = row.network_generator, row.objective_function, row.agent_name, row.graph_id
optimal_id = row.hyperparams_id
optimal_hyperparams[setting] = param_spaces[row.objective_function][row.agent_name][optimal_id], optimal_id
return optimal_hyperparams
def get_evaluation_data(self, experiment_id):
eval_data = self.db[self.MONGO_EVALUATION_COLLECTION].find({"experiment_id": experiment_id})
all_results_rows = []
for eval_item in eval_data:
all_results_rows.extend(list(eval_item['results_rows']))
return all_results_rows
def remove_evaluation_data(self, experiment_id):
self.db[self.MONGO_EVALUATION_COLLECTION].remove({"experiment_id": experiment_id})
def insert_experiment_details(self,
file_paths,
experiment_conditions,
started_str,
started_millis,
parameter_search_spaces,
experiment_id):
all_experiment_details = {}
all_experiment_details['experiment_id'] = experiment_id
all_experiment_details['started_datetime'] = started_str
all_experiment_details['started_millis'] = started_millis
all_experiment_details['file_paths'] = {k: str(v) for k, v in dict(vars(file_paths)).items()}
conds = dict(vars(deepcopy(experiment_conditions)))
del conds["agents_models"]
del conds["agents_baseline"]
del conds["relevant_agents"]
del conds["objective_functions"]
del conds["network_generators"]
del conds["model_seeds_to_skip"]
all_experiment_details['experiment_conditions'] = conds
all_experiment_details['agents'] = [agent.algorithm_name for agent in experiment_conditions.relevant_agents]
all_experiment_details['objective_functions'] = [obj.name for obj in experiment_conditions.objective_functions]
all_experiment_details['network_generators'] = [network_generator.name for network_generator in experiment_conditions.network_generators]
all_experiment_details['parameter_search_spaces'] = parameter_search_spaces
import pprint
pprint.pprint(all_experiment_details)
self.db[self.MONGO_EXPERIMENT_COLLECTION].insert_one(all_experiment_details)
return all_experiment_details
def get_experiment_details(self, experiment_id):
return self.db[self.MONGO_EXPERIMENT_COLLECTION].find(
{"experiment_id": {"$eq": experiment_id}}).limit(1)[0]
def update_with_hyperopt_results(self, experiment_id, optimisation_result):
self.db[self.MONGO_EXPERIMENT_COLLECTION].update_one({"experiment_id": experiment_id},
{"$set": {"optimisation_result": optimisation_result}})
def insert_evaluation_results(self, experiment_id, results_rows):
self.db[self.MONGO_EVALUATION_COLLECTION].insert_one({"experiment_id": experiment_id,
"results_rows": results_rows})
def fetch_all_eval_curves(self, agent_name, hyperparams_id, file_paths, objective_functions, network_generators, model_seeds, train_individually):
all_dfs = []
for obj_fun_name in objective_functions:
for net_gen_name in network_generators:
all_dfs.append(self.fetch_eval_curves(agent_name, hyperparams_id, file_paths, obj_fun_name, net_gen_name, model_seeds, train_individually))
return pd.concat(all_dfs)
def fetch_eval_curves(self, agent_name, hyperparams_id, file_paths, objective_function, network_generator, model_seeds, train_individually):
eval_histories_dir = file_paths.eval_histories_dir
if len(list(eval_histories_dir.iterdir())) == 0:
return pd.DataFrame()
data_dfs = []
for seed in model_seeds:
graph_ids = get_graph_ids_to_iterate(train_individually, network_generator, file_paths)
for idx, g_id in enumerate(graph_ids):
model_identifier_prefix = file_paths.construct_model_identifier_prefix(agent_name, objective_function, network_generator, seed, hyperparams_id, graph_id=g_id)
filename = file_paths.construct_history_file_name(model_identifier_prefix)
data_file = eval_histories_dir / filename
if data_file.exists():
eval_df = pd.read_csv(data_file, sep=",", header=None, names=['timestep', 'perf'], usecols=[0,2])
model_seed_col = [seed] * len(eval_df)
eval_df['model_seed'] = model_seed_col
eval_df['objective_function'] = [objective_function] * len(eval_df)
eval_df['network_generator'] = [network_generator] * len(eval_df)
if g_id is not None:
eval_df['graph_id'] = [g_id] * len(eval_df)
data_dfs.append(eval_df)
all_data_df = | pd.concat(data_dfs) | pandas.concat |
# =============================================================================
# Liberaries
# =============================================================================
import pandas as pd
import os # access files and so on
import sys # for handling exceptions
import re # for checking letter in a string
import numpy as np
import random
import time
import xlrd
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
import solcast
from opencage.geocoder import OpenCageGeocode
import datetime
import math
import json
import urllib.request
from datetime import datetime, timezone
class UCRPMU:
def __init__(self, dst):
self.dst = dst
self.each_day_horizon = 12*24 # each 5 minutes
def get_clean_data(self):
whole_data = | pd.DataFrame(columns=["DateTime", "RealPower", "ReactivePower", "ApprentPower"]) | pandas.DataFrame |
from typing import Dict, List, Tuple
import pandas as pd
import glob, os, logging, datetime, re, copy
import pandas_datareader as pdr
from textblob import TextBlob
from pathlib import Path
import time
class NLPError(Exception): pass
DATE_CUTOFF = '2020-07-14'
COMPANIES_KEYWORDS = {
'AAPL': [' Apple ','Iphone','MacOS','Ipod','Ipad','AirPods','HomePod',
'<NAME>','<NAME>', '<NAME>','<NAME>',' <NAME>',
'<NAME>','Apple Park','Silicon Valley', 'Apple watch','Apple pay',
' IOS ','Safari','iTunes','Big Tech','Tech Giant','Big Four','Four Horsemen',
'Big Five','S&P 5','AAPL','Apple TV','Macintosh','Siri','Shazam'],
'FB': ['FaceBook','<NAME>','<NAME>','<NAME>','Messenger',
'Instagram',' FB ',' IG ','WhatsApp','InstaStory','Facebook Dating','Oculus',
'Giphy','MapiLLary', 'Menlo Park','Silicon Valley','Big Tech','Tech Giant',
'Big Four','Four Horsemen','Big Five','S&P 5'],
'TWTR': ['Twitter','Tweet','<NAME>','<NAME>','Biz Stone',
'<NAME>', '<NAME>','<NAME>','Parag Agrawal',
'TweetDeck',' Vine ','Periscope','MoPub','TWTR'],
'GOOG': ['Google','Alphabet','Silicon Valley','Big Tech','Tech Giant','Big Four',
'Four Horsemen','Big Five','S&P 5','Googleplex','Larry Page','<NAME>',
'<NAME>','<NAME>', '<NAME>','DeepMind','Chrome',
'Youtube',' YT ','TensorFlow','Android','Nexus'],
}
SETTINGS = {
'format': '%(asctime)s | %(levelname)s | %(funcName)s | %(lineno)s | %(message)s',
'log_file': '/tools.log',
'log_folder': os.getcwd()+'/log',
}
Path(SETTINGS['log_folder']).mkdir(parents=True, exist_ok=True)
logging.basicConfig(
filename=SETTINGS['log_folder'] + SETTINGS['log_file'],
filemode='a',
format=SETTINGS['format'],
level=logging.INFO
)
def preprocess_raw_datasets(all_tweets: pd.DataFrame, yahoo_data: dict) -> Dict[str, pd.DataFrame]:
if not all(k in COMPANIES_KEYWORDS.keys() for k in yahoo_data.keys()):
raise NLPError('Keys in yahoo_data do not match with companies')
all_tweets.drop(columns=['Id'], inplace=True)
tweets_by_company = get_tweets_by_company(all_tweets)
sentimeted_data = sentiment_stock_combine(tweets_by_company, yahoo_data)
return sentimeted_data
def get_tweets_by_company(all_tweets: pd.DataFrame) -> Dict[str, pd.DataFrame]:
print("-> Filtering and grouping tweets dataframe...")
t0 = time.time()
combined_dfs = {}
columns = ['Text', 'Date', 'Nick', 'Shares', 'Likes']
for company, keywords in COMPANIES_KEYWORDS.items():
tmp_mask = all_tweets.Text.apply(lambda content: create_mask(content, keywords))
filtered = all_tweets[tmp_mask]
current = combined_dfs.get(company, pd.DataFrame(columns=columns))
combined_dfs[company] = pd.concat([current, filtered], ignore_index=True)
del tmp_mask, current, filtered
for k, v in combined_dfs.items():
v.Text = v.Text.apply(lambda x: " ".join(re.sub("([^0-9A-Za-z \t])|(\w+://\S+)", "", x).split()))
v.Date = v.Date.apply(lambda x: x.split(' ')[0])
v.Likes = | pd.to_numeric(v.Likes) | pandas.to_numeric |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dependency
----------------------------------
Dependency analysis class
Created on Nov 8, 2018
Last edited on Nov 8, 2018
@author: <NAME>
"""
import os
import io
import sys
import datetime
import numpy as np
from IPython import embed
import pandas as pd
import logging
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import seaborn as sns
import re
import subprocess
import yaml
from glob import glob
import scipy
from statsmodels.robust.scale import mad
from collections import Counter
from collections import defaultdict as ddict
from sklearn.metrics import roc_curve, average_precision_score, f1_score, roc_auc_score
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection._search import ParameterGrid
import torch
import paths
from biovnn_model import BioVNNmodel
from utils import compute_AUC_bootstrap, plot_pred_true_r_by_gene_MAD, plot_pred_true_r_by_gene_mean, gene_level_cor, \
individual_auc, plot_ROC, plot_top_ROC, plot_hist_cor, plot_hist_auc
disease_mapping = {'Bladder Cancer': 'BLCA',
'Breast Cancer': 'BRCA',
'breast': 'BRCA',
'Cervical Cancer': 'CESC',
'Colon Cancer': 'COAD',
'Colon/Colorectal Cancer': 'COAD',
'colorectal': 'COAD',
'GBM/Brain Cancer': 'GBM',
'glioblastoma': 'GBM',
'Head and Neck Cancer': 'HNSC',
'upper_aerodigestive': 'HNSC',
'Liver Cancer': 'LIHC',
'liver': 'LIHC',
'Ovarian Cancer': 'OV',
'ovary': 'OV',
'Skin Cancer': 'SKCM',
'skin': 'SKCM',
'Gastric Cancer': 'STAD',
'Soft Tissue/ Thyroid Cancer': 'THCA',
'Thyroid Cancer': 'THCA',
'Endometrial Cancer': 'UCEC',
'Endometrial/Uterine Cancer': 'UCEC',
'uterus': 'UCEC',
'Esophageal Cancer': 'ESCA',
'esophagus': 'ESCA',
'Pancreatic Cancer': 'PAAD',
'pancreas': 'PAAD',
'Non-Small Cell Lung Cancer (NSCLC), Adenocarcinoma': 'LUAD',
'Non-Small Cell Lung Cancer (NSCLC), Squamous Cell Carcinoma': 'LUSC',
'Renal Carcinoma, clear cell': 'KIRC',
'Glioblastoma': 'GBM',
'Acute Myelogenous Leukemia (AML)': 'LAML',
'AML': 'LAML'}
def load_params(output_dir=None, param_f=None):
if param_f is None:
param_f = os.path.join(output_dir, 'param.yaml')
with open(param_f, 'r') as stream:
params = yaml.safe_load(stream)
return params
def save_params(output_dir, params):
with io.open(os.path.join(output_dir, 'param.yaml'), 'w', encoding='utf8') as outfile:
yaml.dump(params, outfile, default_flow_style=False, allow_unicode=True)
assert params == load_params(output_dir)
class Dependency(object):
def __init__(self, cancer_type, data_dir, result_dir, run_name, params,
depmap_ver='19Q3', use_hierarchy=True):
self.method = 'BioVNN'
self.cancer_type = cancer_type
self.n_cluster = None
self.run_name = run_name
self.patient_list = []
self.cancer_type_to_patients = ddict(list)
self.rna_dir = os.path.join(data_dir, 'DepMap', depmap_ver)
self.data_dir = data_dir
if 'ref_groups' in params and params['ref_groups'] == 'GO':
self.community_file = os.path.join(data_dir, 'GO', 'goa_human_20201212.gmt')
self.community_hierarchy_file = os.path.join(self.data_dir, 'GO', 'go_20201212_relation.txt')
else:
self.community_file = os.path.join(data_dir, 'Reactome', 'ReactomePathways.gmt')
self.community_hierarchy_file = os.path.join(self.data_dir, 'Reactome', 'ReactomePathwaysRelation.txt')
self.gene_id_file = os.path.join(self.data_dir, 'Reactome', 'Homo_sapiens_9606.gene_info')
self.gene_id_dict = pd.read_csv(self.gene_id_file, sep='\t', index_col=1)['Symbol'].to_dict()
self.Reactome_name_file = os.path.join(data_dir, 'Reactome', 'ReactomePathways.txt')
self.Reactome_name_dict = pd.read_csv(self.Reactome_name_file, sep='\t', index_col=0, header=None)[1].to_dict()
self.Reactome_reaction_file = os.path.join(self.data_dir, 'Reactome', 'NCBI2Reactome_PE_Reactions_human.txt')
self.Reactome_reaction_df = pd.read_csv(self.Reactome_reaction_file, sep='\t', index_col=None, header=None)
self.Reactome_gene_reaction_dict = ddict(list)
self.Reactome_reaction_gene_dict = ddict(list)
for i, row in self.Reactome_reaction_df.iterrows():
if 'HSA' in row[1] and 'HSA' in row[3]: # Make sure they are from human
if row[0] in self.gene_id_dict:
symbol = self.gene_id_dict[row[0]]
else:
symbol = row[2].split(' [')[0]
self.Reactome_gene_reaction_dict[symbol].append(row[3])
self.Reactome_reaction_gene_dict[row[3]].append(symbol)
self.community_dict = {}
self.community_hierarchy = []
self.community_hierarchy_all = None
self.community_hierarchy_random = []
self.community_hierarchy_random_all = None
self.community_hierarchy_ones = []
self.community_hierarchy_ones_all = None
self.community_hierarchy_dicts_all = {}
self.use_hierarchy = use_hierarchy
self.community_matrix = None
self.result_path = os.path.join(result_dir, self.__class__.__name__, run_name)
self.temp_path = os.path.join(result_dir, self.__class__.__name__, 'temp')
os.makedirs(self.result_path, exist_ok=True)
os.makedirs(self.temp_path, exist_ok=True)
self._dependency_classes = ['Dependency', 'Transfer', 'Postanalysis', 'Postanalysis_ts',
'Postanalysis_transfer', 'Prospective',
'Timestamped', 'Interpret', 'Interpret_ts']
self._dependency_classes_plot = ['Dependency', 'Transfer', 'Postanalysis', 'Postanalysis_ts',
'Postanalysis_transfer', 'Timestamped']
self.params = params
self.load_result = params.get('load_result', False)
self.load_result_dir_name = params.get('load_result_dir_name', False)
if self.load_result and self.load_result_dir_name:
if 'load_result_dir_suffix' in params:
if 'load_result_dir_full' in params:
if params['load_result_dir_full']:
self.load_result_dir = params['load_result_dir_suffix']
else:
self.load_result_dir = os.path.join(result_dir, params['load_result_dir_suffix'])
else:
self.load_result_dir = os.path.join(result_dir, params['load_result_dir_suffix'])
else:
self.load_result_dir = '/'.join(self.result_path.split('/')[:-1] + [self.load_result_dir_name])
params = load_params(self.load_result_dir)
if 'run_mode' in self.params:
run_mode = self.params['run_mode']
else:
run_mode = None
self.params.update(params)
params = self.params
if run_mode:
params['run_mode'] = run_mode
self.params['run_mode'] = run_mode
self.use_cuda = params.get('use_cuda', True)
self.data_types = params.get('data_types', ['rna'])
self.use_all_gene = params.get('use_all_gene', True)
self.exp_ratio_min = params.get('exp_ratio_min', 0.01)
self.feature_max = params.get('feature_max', 99999)
self.feature_per_group_max = params.get('feature_per_group_max', 100)
self.repeat_n = params.get('repeat_n', 1)
self.fold_n = params.get('fold_n', 5)
self.cv_fold = params.get('cv_fold', 0)
self.model_v = params.get('model_v', 'clh_v1')
self.cv_fold_only_run = params.get('cv_fold_only_run', 1)
self.other_cancer_types = params.get('other_cancer_types', [])
self.rna_top_n_std = params.get('rna_top_n_std', 10000)
self.community_affected_size_min = params.get('community_affected_size_min', 5)
self.community_affected_size_max = params.get('community_affected_size_max', 999999)
self.require_label_gene_in_gene_group = params.get('require_label_gene_in_gene_group', True)
self.clip_Xval_Xtest = params.get('clip_Xval_Xtest', [-1, 1])
self.use_MinMaxScaler = params.get('use_MinMaxScaler', False)
self.use_StandardScaler = params.get('use_StandardScaler', True)
self.use_tanh_feature = params.get('use_tanh_feature', False)
self.use_sigmoid_feature = params.get('use_sigmoid_feature', False)
self.use_community_filter = params.get('use_community_filter', True)
self.test_run = params.get('test_run', False)
self.select_genes_in_label = params.get('select_genes_in_label', 'dgidb_w_interaction')
self.use_classification = params.get('use_classification', True)
self.use_binary_dependency = params.get('use_binary_dependency', True)
self.use_class_weights = params.get('use_class_weights', True)
self.use_normalized_class_weights = params.get('use_normalized_class_weights', False)
self.use_sample_class_weights = params.get('use_sample_class_weights', False)
self.use_normalized_sample_class_weights = params.get('use_normalized_sample_class_weights', True)
self.use_all_dependency_gene = params.get('use_all_dependency_gene', True)
self.use_all_feature_for_random_group = params.get('use_all_feature_for_random_group', False)
self.use_all_feature_for_fully_net = params.get('use_all_feature_for_fully_net', False)
self.use_deletion_vector = params.get('use_deletion_vector', True)
self.use_consistant_groups_for_labels = params.get('use_consistant_groups_for_labels', False)
self.run_mode = params.get('run_mode',
'ref') # Could be ref, random_predictor, random, expression_control or full
self.random_group_permutation_ratio = params.get('random_group_permutation_ratio', 1)
self.random_group_hierarchy_permutation_ratio = params.get('random_group_hierarchy_permutation_ratio', 1)
self.random_group_permutation_seed = params.get('random_group_permutation_seed', 9527)
self.leaf_group_gene_in_label_max = params.get('leaf_group_gene_in_label_max', 50)
self.split_by_cancer_type = params.get('split_by_cancer_type', True)
self.save_model_ckpt = params.get('save_model_ckpt', True)
self.output_pred_small = ['RPS20', 'MYC', 'MYCN', 'PIK3CA']
self.GSP_min = params.get('GSP_min', 6)
self.GSN_min = params.get('GSN_min', 6)
self.gene_list = None
self.gene_list_name = None
self.accuracy = None
self.f1 = None
self.confusion_mat = None
self.mcc = None
self.pearson_r = None
self.spearman_rho = None
self.mse = None
self.feature_importance = []
metrics = ['accuracy', 'confusion_mat', 'f1', 'mcc', 'pearson_r', 'spearman_rho', 'mse', 'pearson_r2',
'AUC', 'PR']
data_splits = ['train', 'val', 'test']
for x in metrics:
self.__dict__[x] = ddict(dict)
for z in range(self.repeat_n + 1):
self.__dict__[x][z] = ddict(dict)
for y in data_splits:
self.__dict__[x][z][y] = ddict(list)
for x in ['pred', 'idx']:
self.__dict__[x] = ddict(dict)
for y in data_splits:
self.__dict__[x][y] = ddict(list)
self.metric_output = {}
for y in data_splits:
self.metric_output[y] = pd.DataFrame()
self.save_load_data = ['rna']
self.depmap_ver = depmap_ver
os.makedirs(self.rna_dir, exist_ok=True)
self.save_load_data = ['rna', 'dependency']
self.hdf5_df_file = os.path.join(self.temp_path,
'df_{}_depmap_{}.hdf5'.format('_'.join(sorted(self.data_types)),
self.depmap_ver))
def prepare_data(self):
self.load_communities()
self.load_known_genes()
self.load_selected_gene_list()
if not self.load_data():
self.load_dependency()
if 'rna' in self.data_types:
self.load_rna()
self.save_data()
self.align_data()
def load_selected_gene_list(self):
if isinstance(self.select_genes_in_label, str):
if self.select_genes_in_label.lower() == 'dgidb_w_interaction':
dgidb_file = os.path.join(self.data_dir, 'DGIdb_genes_w_interactions.txt')
else:
raise ValueError("Cannot recongnize select_genes_in_label {}".format(self.select_genes_in_label))
self.select_genes_in_label = pd.read_csv(dgidb_file, header=None)[0].tolist()
elif 'ref_leaf_group' in self.run_mode:
if isinstance(self.select_genes_in_label, list):
leaf_communities, df = self.load_leaf_communities()
initial_select = set(self.select_genes_in_label)
initial_n = len(initial_select)
logging.info("Selected genes {} were used to find additional genes in the same leaf gene groups".format(
self.select_genes_in_label))
leaf_communities_with_genes = {}
for group in leaf_communities:
if len(initial_select.intersection(self.community_dict[group])) > 0:
leaf_communities_with_genes[group] = len(self.community_dict[group])
# Select leaf groups from small to large groups until it reaches the self.leaf_group_gene_in_label_max
for group, size in sorted(leaf_communities_with_genes.items(), key=lambda x: x[1]):
if len(initial_select | set(self.community_dict[group])) < self.leaf_group_gene_in_label_max:
initial_select |= set(self.community_dict[group])
logging.info("{} gene group was added as genes in labels".format(group))
self.select_genes_in_label = sorted(list(initial_select))
logging.info(
"Additional {} genes in the same leaf gene groups with selected genes were added".format(
len(self.select_genes_in_label) - initial_n))
def save_label_genes(self, genes):
"""Save label genes to file."""
fout = open(os.path.join(self.result_path, 'dependency_genes.tsv'), 'w')
for x in genes:
fout.write('{}\n'.format(x))
fout.close()
def save_communities(self, d=None):
"""Save community genes to file."""
if d is None:
fout = open(os.path.join(self.result_path, 'community_list.tsv'), 'w')
d = self.community_dict
s = ''
else:
fout = open(os.path.join(self.result_path, 'community_random_list.tsv'), 'w')
s = '_random'
for k, v in d.items():
fout.write('{}\n'.format('\t'.join([k + s] + v)))
fout.close()
def save_data(self):
hf = pd.HDFStore(self.hdf5_df_file)
for x in self.save_load_data:
if x in self.__dict__:
hf[x] = self.__dict__[x]
hf['data_types'] = pd.DataFrame(self.data_types)
# hf['other_cancer_types'] = pd.DataFrame(self.other_cancer_types)
# hf['cancer_type'] = pd.DataFrame([self.cancer_type])
# for ct in set(self.cancer_type_to_patients.keys()) | set(self.other_cancer_types) | set([self.cancer_type]):
# hf[ct] = pd.DataFrame(self.cancer_type_to_patients[ct])
# if 'cancer_type_to_patients_target' in self.__dict__:
# for ct in set(self.cancer_type_to_patients_target.keys()) | set(self.other_cancer_types) | set(
# [self.cancer_type]):
# hf[ct + '_target'] = pd.DataFrame(self.cancer_type_to_patients_target[ct])
hf.close()
def load_data(self):
if os.path.isfile(self.hdf5_df_file):
hf = pd.HDFStore(self.hdf5_df_file)
try:
for x in self.save_load_data:
if x in hf:
self.__dict__[x] = hf[x]
self.__dict__[x + '_all'] = self.__dict__[x].copy()
logging.info("Loaded data from existing hdf5 file.")
hf.close()
return True
except:
logging.info(
"Current Data types, Cancer type or Other cancer types do not match that of existing hdf5 file.")
hf.close()
return False
else:
return False
def load_communities(self, load_original=True):
"""Parses out a geneset from file."""
if self.load_result and not load_original:
lines = open('{}/community_list.tsv'.format(self.load_result_dir)).readlines()
ind_key = 0
ind_gene = 1
else:
lines = open('{}'.format(self.community_file)).readlines()
if 'pathway' in self.community_file.lower():
ind_key = 1
ind_gene = 3
elif self.community_file.lower().endswith('.gmt'):
ind_key = 1
ind_gene = 3
else:
ind_key = 0
ind_gene = 1
self.community_genes = set()
self.community_dict = {}
self.gene_community_dict = ddict(list)
self.community_size_dict = {}
for line in lines:
line = line.strip().split('\t')
self.community_dict[line[ind_key]] = line[ind_gene:]
self.community_size_dict[line[ind_key]] = len(line[ind_gene:])
self.community_genes |= set(line[ind_gene:])
for g in line[ind_gene:]:
self.gene_community_dict[g].append(line[ind_key])
def load_random_communities(self, load_original=True):
"""Parses out a geneset from file."""
lines = open('{}/community_random_list.tsv'.format(self.load_result_dir)).readlines()
ind_key = 0
ind_gene = 1
self.random_community_genes = set()
self.community_dict_random = {}
self.random_community_size_dict = {}
for line in lines:
line = line.strip().split('\t')
group = line[ind_key].split('_')[0]
self.community_dict_random[group] = line[ind_gene:]
self.random_community_size_dict[group] = len(line[ind_gene:])
self.random_community_genes |= set(line[ind_gene:])
def load_leaf_communities(self):
f = self.community_hierarchy_file
# The first column 0 is the parent and the second column 1 is the child
df = pd.read_csv(f, sep='\t', header=None)
if 'Reactome' in f:
df = df.loc[df[0].str.contains('HSA')] # Get human-only pathways
# Make root as the parent of those gene groups without parents
df_root = pd.DataFrame(columns=df.columns)
for x in set(df[0]) - set(df[1]):
if x in self.community_dict or 'GO:' in x:
df_root = pd.concat([df_root, pd.DataFrame(['root', x]).T])
# Remove those relationship of groups not in the analysis
df = df.loc[df[1].isin(self.community_dict.keys()) & df[0].isin(self.community_dict.keys())]
df = pd.concat([df, df_root])
leaf_communities = sorted(list((set(df[1]) - set(df[0])) & set(self.community_dict.keys())))
return leaf_communities, df
def load_random_hierarchy(self):
f = '{}/random_group_hierarchy.tsv'.format(self.load_result_dir)
df = pd.read_csv(f, sep='\t', header=None)
return df
def load_known_genes(self, depmap_ver=None):
if depmap_ver is None:
depmap_ver = self.depmap_ver
regex = re.compile(r"\d\dQ\d", re.IGNORECASE)
depmap_dir = os.environ.get('DEPMAP_DIR')
if depmap_ver not in depmap_dir:
depmap_dir = regex.sub(depmap_ver, depmap_dir)
if '19Q2' == depmap_ver or '19Q3' == depmap_ver or '19Q4' == depmap_ver or '20Q' in depmap_ver:
depmap_cell_line_file = os.path.join(depmap_dir, 'sample_info.csv')
else:
depmap_cell_line_file = os.path.join(depmap_dir, 'DepMap-20{}-celllines.csv'.format(depmap_ver.lower()))
self.cell_line_metadata = pd.read_csv(depmap_cell_line_file)
self.cell_line_metadata = self.cell_line_metadata.set_index('DepMap_ID')
try:
self.cell_line_id_mapping = self.cell_line_metadata['CCLE_Name'].to_dict()
self.cell_line_id_pri_dis = self.cell_line_metadata.set_index('CCLE_Name')
except:
self.cell_line_id_mapping = self.cell_line_metadata['CCLE Name'].to_dict()
self.cell_line_id_pri_dis = self.cell_line_metadata.set_index('CCLE Name')
try:
self.cell_line_id_pri_dis = self.cell_line_id_pri_dis['Primary Disease'].to_dict()
except:
self.cell_line_id_pri_dis = self.cell_line_id_pri_dis['lineage'].to_dict()
try:
self.cell_line_id_sub_dis = self.cell_line_metadata.set_index('CCLE_Name')
except:
self.cell_line_id_sub_dis = self.cell_line_metadata.set_index('CCLE Name')
try:
self.cell_line_id_sub_dis = self.cell_line_id_sub_dis['Subtype Disease'].to_dict()
except:
self.cell_line_id_sub_dis = self.cell_line_id_sub_dis['lineage_subtype'].to_dict()
self.cell_line_id_mapping = ddict(lambda: None, self.cell_line_id_mapping)
self.cell_line_id_pri_dis = ddict(lambda: None, self.cell_line_id_pri_dis)
self.cell_line_id_sub_dis = ddict(lambda: None, self.cell_line_id_sub_dis)
def load_dependency(self, depmap_ver=None, dep_data_type='Dependency'):
depmap_genetic_vulnerabilities_dir = os.environ.get('DEPMAP_DIR')
regex = re.compile(r"\d\dQ\d", re.IGNORECASE)
if depmap_ver is None:
depmap_ver = self.depmap_ver
if '19Q2' == depmap_ver or '19Q3' == depmap_ver or '19Q4' == depmap_ver or '20Q' in depmap_ver:
if depmap_ver not in depmap_genetic_vulnerabilities_dir:
depmap_genetic_vulnerabilities_dir = regex.sub(depmap_ver, depmap_genetic_vulnerabilities_dir)
if dep_data_type == 'CERES':
depmap_file = 'Achilles_gene_effect.csv'
elif dep_data_type == 'Dependency':
depmap_file = 'Achilles_gene_dependency.csv'
self.dependency = pd.read_csv(os.path.join(depmap_genetic_vulnerabilities_dir, depmap_file), header=0,
index_col=0)
self.dependency.columns = [x.split(' (')[0] for x in self.dependency.columns]
self.dependency = self.dependency[sorted(self.dependency.columns)]
# Map cell line id to name
self.dependency.index = [self.cell_line_id_mapping[x] if x in self.cell_line_id_mapping else x for x in
self.dependency.index]
self.dependency = self.dependency.loc[sorted(self.dependency.index)]
self.dependency = self.dependency.fillna(0)
def load_rna(self, depmap_ver=None):
depmap_genomic_characterization_dir = os.environ.get('DEPMAP_DIR')
regex = re.compile(r"\d\dQ\d", re.IGNORECASE)
if depmap_ver is None:
depmap_ver = self.depmap_ver
if '19Q2' == depmap_ver or '19Q3' == depmap_ver or '19Q4' == depmap_ver or '20Q' in depmap_ver:
if depmap_ver not in depmap_genomic_characterization_dir:
depmap_genomic_characterization_dir = regex.sub(depmap_ver, depmap_genomic_characterization_dir)
depmap_file = 'CCLE_expression.csv'
if '20Q2' in depmap_ver:
sep_str = '\t'
else:
sep_str = ','
self.rna = pd.read_csv(os.path.join(depmap_genomic_characterization_dir, depmap_file), header=0,
index_col=0, sep=sep_str)
self.rna.columns = [x.split(' (')[0] for x in self.rna.columns]
# Merge columns with the same gene symbol
dup_genes = [item for item, count in Counter(self.rna.columns).items() if count > 1]
unique_genes = list(set(self.rna.columns).difference(dup_genes))
RNAseq_gene = self.rna[unique_genes]
for col in set(dup_genes):
RNAseq_gene[col] = self.rna[col].sum(axis=1)
# Map cell line id to name
RNAseq_gene.index = [self.cell_line_id_mapping[x] if x in self.cell_line_id_mapping else x for x in
RNAseq_gene.index]
for cell in set(self.dependency.index).intersection(RNAseq_gene.index):
cell_type = self.cell_line_id_pri_dis[cell]
cell_subtype = self.cell_line_id_sub_dis[cell]
if cell_type in disease_mapping:
if cell not in self.cancer_type_to_patients[disease_mapping[cell_type]]:
self.cancer_type_to_patients[disease_mapping[cell_type]].append(cell)
elif cell_subtype in disease_mapping:
if cell not in self.cancer_type_to_patients[disease_mapping[cell_subtype]]:
self.cancer_type_to_patients[disease_mapping[cell_subtype]].append(cell)
if cell not in self.cancer_type_to_patients[cell_type]:
self.cancer_type_to_patients[cell_type].append(cell)
self.rna = RNAseq_gene
self.rna = self.rna[sorted(self.rna.columns)]
self.rna = self.rna.loc[sorted(self.rna.index)]
self.rna_all = self.rna.copy()
def _subset_samples(self):
# Get overlapping patients among data types
overlapping_patients = set(self.dependency.index)
for x in self.data_types:
# Get patient ID
overlapping_patients &= set(self.__dict__[x].index)
if self.cancer_type == 'PANC':
selected_samples = sorted(list(overlapping_patients))
else:
selected_samples = sorted(list(set(self.cancer_type_to_patients[self.cancer_type])))
overlapping_patients &= set(selected_samples)
overlapping_patients = sorted(list(overlapping_patients))
for x in self.data_types:
self.__dict__[x] = self.__dict__[x].loc[overlapping_patients]
self.dependency = self.dependency.loc[overlapping_patients]
logging.info("Total {} samples have {} and dependency data".format(
len(overlapping_patients), " ".join(self.data_types)))
def _subset_target_genes(self):
try:
self.genes_in_label = pd.read_csv(self.load_result_dir + '/dependency_genes.tsv', sep='\t', header=None)
self.genes_in_label = list(self.genes_in_label.values.T[0])
except:
if self.use_all_dependency_gene:
self.genes_in_label = sorted(list(set(self.community_genes).intersection(self.dependency.columns)))
else:
self.genes_in_label = sorted(list(set(self.genes).intersection(self.dependency.columns)))
if len(self.select_genes_in_label) > 0:
self.genes_in_label = sorted(list(set(self.genes_in_label).intersection(self.select_genes_in_label)))
genes_not_found = set(self.select_genes_in_label).difference(self.genes_in_label)
logging.debug("Genes not found: {}".format(genes_not_found))
if 'Timestamped' not in self.__class__.__name__:
logging.info("{} out of {} selected genes are in dependency data.".format(
len(self.genes_in_label) - len(genes_not_found),
len(self.select_genes_in_label)))
gsp_total = (self.dependency[self.genes_in_label] >= 0.5).sum()
cond = (gsp_total >= self.GSP_min) & (self.dependency.shape[0] - gsp_total >= self.GSN_min)
cond_col = sorted([y for x, y in zip(cond, cond.index) if x])
logging.info("{} genes have at least {} gold standard positives and {} negatives".format(len(cond_col),
self.GSP_min,
self.GSN_min))
self.dependency = self.dependency[cond_col]
self.genes_in_label = cond_col
self.gsp_n = (self.dependency >= 0.5).sum().sum()
self.gsn_n = (self.dependency < 0.5).sum().sum()
if self.use_classification:
logging.info("Positive:negative samples = {}:{}".format(self.gsp_n, self.gsn_n))
def _select_feature_genes(self):
overlapping_genes = set(self.community_genes)
try:
self.rna_mad = pd.read_csv(self.load_result_dir + '/RNA_mad.tsv', sep='\t', index_col=0)
self.rna_mad.columns = [0]
except:
overlapping_genes &= set(self.rna.columns)
self.rna = self.rna[sorted(list(overlapping_genes))]
expressed_genes = ((self.rna >= 1).sum() > (self.rna.shape[0]) * self.exp_ratio_min)
self.rna_mad = self.rna.apply(mad)
self.rna_mad = pd.DataFrame(self.rna_mad, index=self.rna.columns)
self.rna_mad = self.rna_mad.loc[expressed_genes]
self.rna_mad = self.rna_mad.sort_values(by=0, ascending=False)
self.rna_mad.to_csv(os.path.join(self.result_path, 'RNA_mad.tsv'), sep='\t')
top_mad_genes = self.rna_mad.head(min(self.rna_top_n_std, self.rna_mad.shape[0])).index
self.output_pred_small += list(top_mad_genes)[0:20]
self.output_pred_small += list(top_mad_genes)[
int(self.rna_top_n_std / 2 - 10):int(self.rna_top_n_std / 2 + 10)]
self.output_pred_small += list(top_mad_genes)[-20:]
self.rna = self.rna[top_mad_genes]
overlapping_genes &= set(self.rna.columns)
self.rna = self.rna[sorted(list(overlapping_genes))]
logging.info("Total {} genes have top {} mad and gene group data".format(
len(overlapping_genes), self.rna.shape[1]))
def _filter_community(self):
com_to_drop = []
modeled_com_genes = set()
modeled_genes = set()
for data_type in self.data_types:
modeled_genes |= set(self.__dict__[data_type].columns)
for com, members in self.community_dict.items():
if self.use_all_dependency_gene:
self.community_dict[com] = sorted(
list((set(modeled_genes) & set(members)) | (set(members) & set(self.genes_in_label))))
else:
self.community_dict[com] = sorted(list(set(modeled_genes).intersection(members)))
if len(self.community_dict[com]) < self.community_affected_size_min:
com_to_drop.append(com)
elif len(self.community_dict[com]) > self.community_affected_size_max:
com_to_drop.append(com)
elif len(set(members) & set(self.genes_in_label)) < 1:
if self.require_label_gene_in_gene_group:
com_to_drop.append(com)
else:
modeled_com_genes |= set(self.community_dict[com])
else:
modeled_com_genes |= set(self.community_dict[com])
for com in com_to_drop:
self.community_dict.pop(com, None)
def _run_create_filter(self):
self.feature_genes = set()
self.genes_in_label_idx = {}
self.idx_genes_in_label = {}
self.community_filter = self.__create_filter(self.gene_community_dict, self.community_dict,
self.community_size_dict, random=False)
def __create_filter(self, gene_community_dict, community_dict, community_size_dict, random=False):
community_filter = ddict(set)
if not random:
self.genes_in_label_idx = {}
self.idx_genes_in_label = {}
i = 0
for g in self.genes_in_label:
coms = gene_community_dict[g]
coms = list(set(coms) & (community_dict.keys()))
com_size = [community_size_dict[x] for x in coms]
community_filter[g] |= set([g])
for s, com in sorted(zip(com_size, coms)):
genes = set(community_dict[com])
# Choose top n genes so that not too many features were used per gene group
if 'ref' not in self.run_mode and self.use_all_feature_for_random_group:
if len(self.data_types) > 1:
added_genes = set(genes - community_filter[g]) & (set(self.mut.columns) | set(self.rna.columns))
elif 'rna' in self.data_types:
added_genes = set(genes - community_filter[g]) & set(self.rna.columns)
elif 'mut' in self.data_types:
added_genes = set(genes - community_filter[g]) & set(self.mut.columns)
if len(added_genes) == 0:
continue
if isinstance(self.feature_per_group_max, int):
choose_n = min(self.feature_per_group_max, len(added_genes))
top_genes = list(np.random.choice(list(added_genes), choose_n, replace=False))
elif isinstance(self.feature_per_group_max, float) and self.feature_per_group_max < 1:
top_n = np.ceil(len(genes) * self.feature_per_group_max)
choose_n = min(top_n, len(added_genes))
top_genes = list(np.random.choice(list(added_genes), choose_n, replace=False))
else:
raise ValueError("feature_per_group_max {} should be integer or between 0 and 1".format(
self.feature_per_group_max))
else:
if len(self.data_types) > 1:
added_genes = set(genes - community_filter[g]) & (set(self.mut.columns) | set(self.rna.columns))
variable_genes = self.rna_mad.loc[list(added_genes)].sort_values(0, ascending=False)
elif 'rna' in self.data_types:
added_genes = set(genes - community_filter[g]) & set(self.rna.columns)
variable_genes = self.rna_mad.loc[list(added_genes)].sort_values(0, ascending=False)
elif 'mut' in self.data_types:
added_genes = set(genes - community_filter[g]) & set(self.mut.columns)
variable_genes = self.mut_freq.loc[list(added_genes)].sort_values(0, ascending=False)
if isinstance(self.feature_per_group_max, int):
top_genes = variable_genes.head(self.feature_per_group_max).index
elif isinstance(self.feature_per_group_max, float) and self.feature_per_group_max < 1:
top_n = np.ceil(len(genes) * self.feature_per_group_max)
top_genes = variable_genes.head(top_n).index
else:
raise ValueError("feature_per_group_max {} should be integer or between 0 and 1".format(
self.feature_per_group_max))
community_filter[g] |= set(top_genes)
if len(community_filter[g]) >= self.feature_max:
break
if not random:
if len(community_filter[g]) > 0:
self.genes_in_label_idx[g] = i
self.idx_genes_in_label[i] = g
i += 1
else:
logging.info("Gene {} could not find feature genes".format(g))
if not random:
logging.info(
"The dependency of total {} genes will be predicted".format(len(self.genes_in_label_idx.keys())))
return community_filter
def _build_hierarchy(self):
leaf_communities, df = self.load_leaf_communities()
child = leaf_communities
# The layer having only gene children
level = 1
self.community_level_dict = dict()
self.level_community_dict = dict()
count_dict = ddict(int)
for x in child:
self.community_level_dict[x] = level
count_dict[x] += 1
self.level_community_dict[level] = child
# logging.info("Layer {} has {} gene groups".format(level, len(child)))
while 1:
df_level = df.loc[df[1].isin(child)]
if df_level.shape[0] == 0:
break
level += 1
parent = sorted(list(set(df_level[0])))
for parent_group in parent:
self.community_level_dict[parent_group] = level
count_dict[parent_group] += 1
self.level_community_dict[level] = parent
child = parent
# Make the layer number of each community unique
self.level_community_dict = ddict(list)
for g, level in self.community_level_dict.items():
self.level_community_dict[level].append(g)
for level, groups in sorted(self.level_community_dict.items()):
logging.info("Layer {} has {} gene groups".format(level, len(groups)))
gene_groups_all = sorted(list(self.community_dict.keys())) + ['root']
logging.info(
"Total {} layers of {} gene groups in the hierarchy including the root".format(level, len(gene_groups_all)))
feature_genes_all = []
self.feature_n = []
np.random.RandomState(self.params['seeds'][0])
for data_type in self.data_types:
feat_n = len(self.__dict__[data_type].columns)
self.feature_n.append(feat_n)
# Randomly reselect features for each feature matrix
if 'full' in self.run_mode and self.use_all_feature_for_fully_net:
feat_pool = sorted(list(self.__dict__[data_type + '_all'].columns))
feature_genes_all += feat_pool
cell_idx = self.__dict__[data_type].index
self.__dict__[data_type] = self.__dict__[data_type + '_all'].loc[cell_idx, feat_pool]
logging.info(
"Use all {} genes from {} as features to form fully connected networks".format(feat_n, data_type))
elif 'ref' not in self.run_mode and self.use_all_feature_for_random_group:
feat_pool = list(self.__dict__[data_type + '_all'].columns)
# Require gene labels in the features
pre_select = set(feat_pool) & set(self.genes_in_label)
feat_pool = sorted(list(set(feat_pool) - set(self.genes_in_label)))
random_feat = sorted(list(np.random.choice(feat_pool, feat_n - len(pre_select), replace=False)))
feature_genes_all += random_feat + list(pre_select)
feature_genes_all = sorted(feature_genes_all)
cell_idx = self.__dict__[data_type].index
self.__dict__[data_type] = self.__dict__[data_type + '_all'].loc[cell_idx, random_feat]
logging.info(
"Randomly select {} genes including {} gene of prediction from {} as features to form random gene groups".format(
feat_n, len(self.genes_in_label), data_type))
else:
feature_genes_all += sorted(list(self.__dict__[data_type].columns))
del_genes_all = sorted(list(self.genes_in_label_idx.keys()))
self.feature_n.append(len(del_genes_all))
self.genes_in_label = del_genes_all
self.save_label_genes(self.genes_in_label)
self.y = self.dependency[self.genes_in_label]
self.y_binary = ((self.y >= 0.5) + 0).astype(int)
# The order of indexed genes and gen groups:
if self.use_deletion_vector:
entity_all = feature_genes_all + del_genes_all + gene_groups_all
else:
entity_all = feature_genes_all + gene_groups_all
self.idx_name = {i: k for i, k in enumerate(entity_all)}
name_idx = ddict(list)
for k, v in self.idx_name.items():
name_idx[v].append(k)
if len(self.data_types) > 1:
self.mut_genes_idx = {}
self.rna_genes_idx = {}
for k, v in name_idx.items():
for idx in v:
if idx < self.feature_n[0]:
self.mut_genes_idx[k] = idx
elif self.feature_n[0] <= idx < self.feature_n[0] + self.feature_n[1]:
self.rna_genes_idx[k] = idx
self.feature_genes_idx = {x: min(name_idx[x]) for x in feature_genes_all}
self.del_genes_idx = {x: max(name_idx[x]) for x in del_genes_all}
self.gene_group_idx = {x: name_idx[x][0] for x in gene_groups_all}
self.community_hierarchy_dicts_all = {'idx_name': self.idx_name,
'feature_genes_idx': self.feature_genes_idx,
'del_genes_idx': self.del_genes_idx,
'gene_group_idx': self.gene_group_idx}
self.child_map_all = []
self.child_map_all_random = []
self.child_map_all_ones = []
feature_only_genes = set(feature_genes_all) - set(del_genes_all)
dep_only_genes = set(del_genes_all) - set(feature_genes_all)
feature_dep_both_genes = set(feature_genes_all) & set(del_genes_all)
gene_pool = sorted(list(set(feature_genes_all) | set(del_genes_all)))
self.community_filter_random = ddict(list)
if 'Timestamped' in self.__class__.__name__ or 'Sensitivity' in self.__class__.__name__:
self.load_random_communities()
random_hierarchy = self.load_random_hierarchy()
else:
self.community_dict_random = {}
random_hierarchy = pd.DataFrame()
self.gene_community_dict_random = ddict(list)
self.community_size_dict_random = {}
prng = np.random.RandomState(self.params['seeds'][0])
logging.info("Building gene group hierarchy")
if self.run_mode == 'random':
idx_gene_pool = {i: g for i, g in enumerate(gene_pool)}
gene_pool_idx = {g: i for i, g in enumerate(gene_pool)}
partially_shuffled_membership = self.__partially_shuffle_gene_group(gene_pool, gene_pool_idx)
idx_gene_group = {i: g for g, i in self.gene_group_idx.items()}
partially_shuffled_relation = self.__partially_shuffle_gene_group_hierarchy(df, idx_gene_group)
else:
partially_shuffled_membership = None
partially_shuffled_relation = None
idx_gene_group = None
idx_gene_pool = None
min_group_idx = min(self.gene_group_idx.values())
for group, idx in sorted(self.gene_group_idx.items()):
if group in self.community_dict:
genes = self.community_dict[group]
gene_idx = self._genes_to_feat_del_idx(genes)
if 'Timestamped' in self.__class__.__name__ or 'Sensitivity' in self.__class__.__name__:
genes_random = self.community_dict_random[group]
else:
if partially_shuffled_membership is not None:
genes_random_idx = partially_shuffled_membership[idx - min_group_idx].nonzero()[0]
genes_random = sorted([idx_gene_pool[x] for x in genes_random_idx])
else:
if self.use_consistant_groups_for_labels:
gene_pool = sorted(list(set(gene_pool) - set(self.genes_in_label)))
pre_select = set(genes) & set(self.genes_in_label)
if len(set(genes) & set(self.genes_in_label)) > 0:
random_feat = list(prng.choice(gene_pool, len(genes) - len(pre_select), replace=False))
genes_random = sorted(random_feat + list(pre_select))
else:
genes_random = sorted(
list(prng.choice(gene_pool, len(genes) - len(pre_select), replace=False)))
else:
genes_random = sorted(list(prng.choice(gene_pool, len(genes), replace=False)))
self.community_dict_random[group] = genes_random
for g in genes_random:
self.gene_community_dict_random[g].append(group)
self.community_size_dict_random[group] = len(genes_random)
feat_genes = set(genes_random) & set(self.feature_genes_idx.keys())
del_genes = set(genes_random) & set(self.del_genes_idx.keys())
if len(self.data_types) > 1:
feat_gene_idx = []
for g in feat_genes:
if g in self.mut_genes_idx:
feat_gene_idx.append(self.mut_genes_idx[g])
if g in self.rna_genes_idx:
feat_gene_idx.append(self.rna_genes_idx[g])
else:
feat_gene_idx = [self.feature_genes_idx[x] for x in feat_genes]
if self.use_deletion_vector:
del_gene_idx = [self.del_genes_idx[x] for x in del_genes]
else:
del_gene_idx = []
gene_idx_random = feat_gene_idx + del_gene_idx
else:
gene_idx = []
gene_idx_random = []
child = sorted(df.loc[df[0] == group, 1].tolist())
child_idx = sorted([self.gene_group_idx[x] for x in child if x in self.gene_group_idx])
self.child_map_all.append(sorted(gene_idx + child_idx))
if len(self.child_map_all[-1]) == 0:
logging.info("Gene group {} does not have children".format(group))
# Build random group hierarchy
if 'Timestamped' in self.__class__.__name__ or 'Sensitivity' in self.__class__.__name__:
child_random = sorted(random_hierarchy.loc[random_hierarchy[0] == group, 1].tolist())
child_idx_random = sorted([self.gene_group_idx[x] for x in child_random if x in self.gene_group_idx])
else:
if partially_shuffled_relation is not None:
child_idx_random = partially_shuffled_relation[idx - min_group_idx, :].nonzero()[0]
child_idx_random = [x + min_group_idx for x in child_idx_random]
child_random = sorted([idx_gene_group[x] for x in child_idx_random])
else:
child_idx_random = []
child_random = []
for c in child:
child_level = self.community_level_dict[c]
random_child = prng.choice(self.level_community_dict[child_level], 1, replace=False)[0]
child_random.append(random_child)
random_c_idx = self.gene_group_idx[random_child]
child_idx_random.append(random_c_idx)
for rc in sorted(child_random):
random_hierarchy = pd.concat([random_hierarchy, pd.DataFrame([group, rc]).T], axis=0)
self.child_map_all_random.append(sorted(gene_idx_random + child_idx_random))
try:
assert len(gene_idx) == len(gene_idx_random), "Random gene number does not match"
except AssertionError:
pass
# Children for fully connected neural networks
if group in leaf_communities:
gene_idx_ones = list(self.feature_genes_idx.values())
else:
gene_idx_ones = []
parent_level = self.community_level_dict[group]
child_level = parent_level - 1
if child_level in self.level_community_dict:
child_ones = self.level_community_dict[child_level]
else:
child_ones = []
child_idx_ones = [self.gene_group_idx[x] for x in child_ones if x in self.gene_group_idx]
self.child_map_all_ones.append(sorted(gene_idx_ones + child_idx_ones))
self.save_communities(self.community_dict_random)
# Save random hierarchy as file
random_hierarchy.to_csv(os.path.join(self.result_path, 'random_group_hierarchy.tsv'),
index=None, sep='\t', header=None)
self.community_filter_random = self.__create_filter(self.gene_community_dict_random, self.community_dict_random,
self.community_size_dict_random, random=True)
self.community_filter_map = []
self.community_filter_map_random = []
feature_n = len(feature_genes_all)
for g in del_genes_all:
feat_genes = set(self.community_filter[g])
if len(self.data_types) > 1:
feat_gene_idx = []
for g in feat_genes:
if g in self.mut_genes_idx:
feat_gene_idx.append(self.mut_genes_idx[g])
if g in self.rna_genes_idx:
feat_gene_idx.append(self.rna_genes_idx[g])
feat_gene_idx = sorted(feat_gene_idx)
else:
feat_gene_idx = sorted([self.feature_genes_idx[x] for x in feat_genes if x in self.feature_genes_idx])
feat_genes_array = np.zeros(feature_n)
feat_genes_array[feat_gene_idx] = 1
self.community_filter_map.append(feat_genes_array)
feat_genes_random = set(self.community_filter_random[g])
if len(self.data_types) > 1:
feat_genes_random_idx = []
for g in feat_genes:
if g in self.mut_genes_idx:
feat_genes_random_idx.append(self.mut_genes_idx[g])
if g in self.rna_genes_idx:
feat_genes_random_idx.append(self.rna_genes_idx[g])
feat_genes_random_idx = sorted(feat_genes_random_idx)
else:
feat_genes_random_idx = sorted(
[self.feature_genes_idx[x] for x in feat_genes_random if x in self.feature_genes_idx])
feat_genes_array = np.zeros(feature_n)
feat_genes_array[feat_genes_random_idx] = 1
self.community_filter_map_random.append(feat_genes_array)
def __partially_shuffle_gene_group(self, gene_pool, gene_pool_idx):
group_gene_membership_matrix = np.zeros([len(self.gene_group_idx), len(gene_pool)])
min_group_idx = min(self.gene_group_idx.values())
for group, idx in sorted(self.gene_group_idx.items()):
if group in self.community_dict:
idx -= min_group_idx
genes = self.community_dict[group]
gene_idx = [gene_pool_idx[gene] for gene in genes]
group_gene_membership_matrix[idx, gene_idx] = 1
all_idx = group_gene_membership_matrix.nonzero()
prng = np.random.RandomState(self.random_group_permutation_seed)
shuffled_number = int(self.random_group_permutation_ratio * len(all_idx[0]))
shuffled_relationship_idx = prng.choice(range(len(all_idx[0])), shuffled_number, replace=False)
logging.info(
f"{self.random_group_permutation_ratio*100}% ({shuffled_number}) of gene membership was randomly shuffled")
# No shuffling
if self.random_group_permutation_ratio == 0:
return group_gene_membership_matrix
connections_to_shuffled = np.zeros([len(self.gene_group_idx), len(gene_pool)])
connections_to_shuffled[all_idx[0][shuffled_relationship_idx], all_idx[1][shuffled_relationship_idx]] = 1
partially_shuffled_membership = np.zeros([len(self.gene_group_idx), len(gene_pool)])
for i in range(group_gene_membership_matrix.shape[0]):
original = group_gene_membership_matrix[i].nonzero()[0]
to_shuffled = connections_to_shuffled[i].nonzero()[0]
if len(to_shuffled) > 0:
keep = list(set(original) - set(to_shuffled))
pool = sorted(list(set(range(len(group_gene_membership_matrix[i]))) - set(keep)))
after_shuffled = list(prng.choice(pool, len(to_shuffled), replace=False))
partially_shuffled_membership[i][keep + after_shuffled] = 1
else:
partially_shuffled_membership[i][original] = 1
return partially_shuffled_membership
def __partially_shuffle_gene_group_hierarchy(self, df, idx_gene_group):
gene_group_relation_matrix = np.zeros([len(self.gene_group_idx), len(self.gene_group_idx)])
min_group_idx = min(self.gene_group_idx.values())
for _, row in df.iterrows():
parent = self.gene_group_idx[row[0]] - min_group_idx
child = self.gene_group_idx[row[1]] - min_group_idx
gene_group_relation_matrix[parent, child] = 1
all_idx = gene_group_relation_matrix.nonzero()
prng = np.random.RandomState(self.random_group_permutation_seed)
shuffled_number = int(self.random_group_hierarchy_permutation_ratio * len(all_idx[0]))
shuffled_relationship_idx = prng.choice(range(len(all_idx[0])), shuffled_number, replace=False)
logging.info(
f"{self.random_group_hierarchy_permutation_ratio*100}% ({shuffled_number}) of gene group hierarchy was randomly shuffled")
connections_to_shuffled = np.zeros(gene_group_relation_matrix.shape)
connections_to_shuffled[all_idx[0][shuffled_relationship_idx], all_idx[1][shuffled_relationship_idx]] = 1
partially_shuffled_relation = np.zeros(gene_group_relation_matrix.shape)
# No shuffling
if self.random_group_hierarchy_permutation_ratio == 0:
return gene_group_relation_matrix
# Shuffle child group for each parent
for i in range(gene_group_relation_matrix.shape[0]):
original = gene_group_relation_matrix[i].nonzero()[0]
to_shuffled = connections_to_shuffled[i].nonzero()[0]
if len(to_shuffled) > 0:
keep = list(set(original) - set(to_shuffled))
children = [idx_gene_group[x + min_group_idx] for x in to_shuffled]
child_levels = [self.community_level_dict[child] for child in children]
after_shuffled = []
for child_level in child_levels:
random_child = prng.choice(self.level_community_dict[child_level], 1, replace=False)[0]
random_child_idx = self.gene_group_idx[random_child] - min_group_idx
after_shuffled.append(random_child_idx)
after_shuffled = list(set(after_shuffled))
partially_shuffled_relation[i][keep + after_shuffled] = 1
else:
partially_shuffled_relation[i][original] = 1
return partially_shuffled_relation
def _genes_to_feat_del_idx(self, genes):
feat_genes = set(genes) & set(self.feature_genes_idx.keys())
del_genes = set(genes) & set(self.del_genes_idx.keys())
if len(self.data_types) > 1:
feat_gene_idx = []
for g in feat_genes:
if g in self.mut_genes_idx:
feat_gene_idx.append(self.mut_genes_idx[g])
if g in self.rna_genes_idx:
feat_gene_idx.append(self.rna_genes_idx[g])
else:
feat_gene_idx = [self.feature_genes_idx[x] for x in feat_genes]
if self.use_deletion_vector:
del_gene_idx = [self.del_genes_idx[x] for x in del_genes]
else:
del_gene_idx = []
gene_idx = feat_gene_idx + del_gene_idx
return gene_idx
def _get_genes_in_child_group(self, group, genes_in_child_gene_group=set()):
_, df = self.load_leaf_communities()
children = df.loc[df[0] == group, 1].tolist()
for child in children:
if child in self.community_dict:
genes = self.community_dict[child]
genes_in_child_gene_group |= set(genes)
self._get_genes_in_child_group(child, genes_in_child_gene_group)
return genes_in_child_gene_group
def align_data(self):
self._subset_samples()
self._subset_target_genes()
self._select_feature_genes()
self._filter_community()
self._run_create_filter()
if len(self.data_types) > 1:
self.X = pd.concat([self.mut, self.rna], axis=1)
else:
self.X = self.__dict__[self.data_types[0]]
self.X_all = self.X
self._build_hierarchy()
# self._refine_community()
logging.info("Generating data splits for {} repeats and {} folds".format(self.repeat_n, self.fold_n))
self.split_data()
def split_data(self):
self.split_idx = dict()
for repeat in range(self.repeat_n):
seed = self.params['seeds'][repeat]
if self.split_by_cancer_type and self.cancer_type == 'PANC':
cancer_type_id = ddict(list)
for x in self.X.index:
t = '_'.join(x.split('_')[1:])
cancer_type_id[t].append(x)
self.split_idx[repeat] = [ddict(list) for _ in range(self.fold_n)]
for j, (cancer_type, idx) in enumerate(cancer_type_id.items()):
logging.debug("{} has {} cell lines".format(cancer_type, len(idx)))
if len(idx) >= self.fold_n + 1:
logging.debug("{} has {} cell lines splitting".format(cancer_type, len(idx)))
split_subidx = self._split_data(self.X.loc[idx], self.y.loc[idx], seed)
for fold, split_dict in enumerate(split_subidx):
for split_type in split_dict.keys():
self.split_idx[repeat][fold][split_type] += list(split_dict[split_type])
if 'Timestamped' in self.__class__.__name__ or 'Sensitivity' in self.__class__.__name__:
target_idx = set(self.dependency_target.index) & set(self.rna_target_all.index)
target_idx_only = target_idx - set(self.dependency.index)
target_idx_only = sorted(list(target_idx_only))
for fold in range(len(self.split_idx[repeat])):
self.split_idx[repeat][fold]['test'] = target_idx_only
self.X_all = pd.concat([self.X_all, self.rna_target.loc[target_idx_only, self.X_all.columns]])
self.y = pd.concat([self.y, self.dependency_target.loc[target_idx_only, self.y.columns]])
y_binary_target = ((self.y.loc[target_idx_only] >= 0.5) + 0).astype(int)
self.y_binary = pd.concat([self.y_binary, y_binary_target])
else:
self.split_idx[repeat] = self._split_data(self.X, self.y, seed)
def _split_data(self, X, y, seed):
kf1 = KFold(n_splits=self.fold_n, random_state=seed)
split_idx = []
for fold, (train_index, test_index) in enumerate(kf1.split(X, y)):
split_dict = dict()
split_dict['test'] = list(X.index[test_index])
# Generate validation data by splitting part of training data
X_train, y_train = X.loc[X.index[train_index]], y.loc[X.index[train_index]]
if X_train.shape[0] < self.fold_n:
return []
kf = KFold(n_splits=self.fold_n, random_state=seed)
for fold_2, (train_index, test_index) in enumerate(kf.split(X_train, y_train)):
split_dict['train'] = list(X_train.index[train_index])
split_dict['val'] = list(X_train.index[test_index])
if fold_2 == fold: # Taking the different splits to differentiate it
break
split_idx.append(split_dict)
return split_idx
def get_split_data(self, i, j):
self.idx['train'] = self.split_idx[i][j]['train']
self.idx['val'] = self.split_idx[i][j]['val']
self.idx['test'] = self.split_idx[i][j]['test']
if self.use_binary_dependency:
y = self.y_binary
else:
y = self.y
self.X_train, self.y_train = self.X_all.loc[self.idx['train']].values, y.loc[self.idx['train']].values
self.X_val, self.y_val = self.X_all.loc[self.idx['val']].values, y.loc[self.idx['val']].values
self.X_test, self.y_test = self.X_all.loc[self.idx['test']].values, y.loc[self.idx['test']].values
if 'cl3_' in self.model_v or 'cl5_' in self.model_v:
scaler = StandardScaler()
self.y_train2 = scaler.fit_transform(self.y_train)
self.y_val2 = scaler.transform(self.y_val)
self.y_test2 = scaler.transform(self.y_test)
elif 'clh_' in self.model_v:
self.y_train2 = self.y_train
self.y_val2 = self.y_val
self.y_test2 = self.y_test
else:
self.y_train2 = None
self.y_val2 = None
self.y_test2 = None
logging.info("Repeat {}, fold {}".format(i, j))
logging.info("Training data shape X: {}, y: {}".format(self.X_train.shape, self.y_train.shape))
logging.info("y label counts: {}".format(Counter(np.argmax(self.y_train, axis=1))))
logging.info("Validation data shape X: {}, y: {}".format(self.X_val.shape, self.y_val.shape))
logging.info("y label counts: {}".format(Counter(np.argmax(self.y_val, axis=1))))
logging.info("Test data shape X: {}, y: {}".format(self.X_test.shape, self.y_test.shape))
logging.info("y label counts: {}".format(Counter(np.argmax(self.y_test, axis=1))))
def perform(self, model_name, params=None):
if params is None:
params = self.params
save_params(self.result_path, params)
if self.cv_fold != 0:
if 'models' in params and 'random_forest' in self.run_mode:
self.perform_cv('random_forest', params)
else:
self.perform_cv(model_name, params)
else:
self.prepare_data()
# self.community_filter_ones = np.ones(self.community_filter.shape)
model_name_base = model_name
for repeat in range(self.repeat_n):
params['seed'] = params['seeds'][repeat]
# self.community_matrix_random = lil_matrix(self.community_matrix.shape)
np.random.seed(params['seed'])
if 'clh_v' in self.model_v:
mask = self.child_map_all
mask_random = self.child_map_all_random
mask_ones = self.child_map_all_ones
else:
mask = self.community_hierarchy
mask_random = self.community_hierarchy_random
mask_ones = self.community_hierarchy_ones
for fold in range(len(self.split_idx[repeat])):
model_suffix = str(params['seed']) + 'repeat' + str(repeat) + 'fold' + str(fold)
self.get_split_data(repeat, fold)
self.calculate_weights()
self.normalize_data()
if 'ref' in self.run_mode:
self.run_exp(model_name_base, model_suffix,
params, mask, repeat, fold, self.community_filter_map)
elif 'random_forest' in self.run_mode.lower():
self.run_exp('random_forest', model_suffix,
params, mask, repeat, fold, None, mask_ones)
elif 'random_predictor' in self.run_mode:
self.run_exp('random_predictor', model_suffix,
params, mask_random, repeat, fold, self.community_filter_map_random)
elif 'random' in self.run_mode:
self.run_exp('random_control', model_suffix,
params, mask_random, repeat, fold, self.community_filter_map_random)
elif 'expression_control' in self.run_mode:
self.run_exp('expression_control', model_suffix,
params, mask_random, repeat, fold, self.community_filter_map_random)
elif 'full' in self.run_mode:
self.run_exp('gene_control', model_suffix,
params, mask, repeat, fold, None, mask_ones)
def calculate_weights(self):
if self.use_class_weights:
gsp_n = (self.y_train >= 0.5).sum().sum()
gsn_n = (self.y_train < 0.5).sum().sum()
if self.use_normalized_class_weights:
self.class_weight_neg = (gsp_n + gsn_n) / (2.0 * (gsn_n))
self.class_weight_pos = (gsp_n + gsn_n) / (2.0 * (gsp_n))
else:
self.class_weight_neg = gsp_n / gsn_n
self.class_weight_pos = 1
else:
self.class_weight_neg = None
self.class_weight_pos = None
if self.use_sample_class_weights:
gsp_n = (self.y_train >= 0.5).sum(axis=0)
gsn_n = (self.y_train < 0.5).sum(axis=0)
if self.use_normalized_sample_class_weights:
self.sample_class_weight_neg = (gsp_n + gsn_n) / (2.0 * (gsn_n))
self.sample_class_weight_pos = (gsp_n + gsn_n) / (2.0 * (gsp_n))
else:
self.sample_class_weight_neg = gsp_n / gsn_n
self.sample_class_weight_pos = np.array([1] * len(gsn_n))
else:
self.sample_class_weight_neg = None
self.sample_class_weight_pos = None
def split_data_cv(self):
self.split_idx_cv = ddict(list)
for repeat in range(self.repeat_n):
seed = self.params['seeds'][repeat]
kf1 = KFold(n_splits=self.cv_fold, random_state=seed)
idx = sorted(list(self.idx['train']) + list(self.idx['val']))
X_train_val = self.X_all.loc[idx]
y_train_val = self.y.loc[idx]
for train_index, val_index in kf1.split(X_train_val, y_train_val):
split_dict = {}
split_dict['train'] = X_train_val.index[train_index]
split_dict['val'] = X_train_val.index[val_index]
self.split_idx_cv[repeat].append(split_dict)
def get_split_data_cv(self, i, j):
self.idx['train'] = self.split_idx_cv[i][j]['train']
self.idx['val'] = self.split_idx_cv[i][j]['val']
if self.use_binary_dependency:
y = self.y_binary
else:
y = self.y
self.X_train, self.y_train = self.X_all.loc[self.idx['train']].values, y.loc[self.idx['train']].values
self.X_val, self.y_val = self.X_all.loc[self.idx['val']].values, y.loc[self.idx['val']].values
if 'cl3_' in self.model_v or 'cl5_' in self.model_v:
scaler = StandardScaler()
self.y_train2 = scaler.fit_transform(self.y_train)
self.y_val2 = scaler.transform(self.y_val)
self.y_test2 = scaler.transform(self.y_test)
elif 'clh_' in self.model_v:
self.y_train2 = self.y_train
self.y_val2 = self.y_val
self.y_test2 = self.y_test
else:
self.y_train2 = None
self.y_val2 = None
self.y_test2 = None
logging.info("Repeat {}, cv_fold {}".format(i, j))
logging.info("Training data shape X: {}, y: {}".format(self.X_train.shape, self.y_train.shape))
logging.info("y label counts: {}".format(Counter(np.argmax(self.y_train, axis=1))))
logging.info("Validation data shape X: {}, y: {}".format(self.X_val.shape, self.y_val.shape))
logging.info("y label counts: {}".format(Counter(np.argmax(self.y_val, axis=1))))
def _normalize_rna(self, X_train, X_val, X_test):
# scaler = MinMaxScaler()
# self.X_train = scaler.fit_transform(self.X_train)
# self.X_val = scaler.transform(self.X_val)
# self.X_test = scaler.transform(self.X_test)
# self.X_train = np.log2(self.X_train + 1)
# self.X_val = np.log2(self.X_val + 1)
# self.X_test = np.log2(self.X_test + 1)
if self.use_StandardScaler:
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
# feature_no_info = ((self.X_train.sum(axis=0) == 0) + 0).nonzero()[0]
X_val = scaler.transform(X_val)
# self.X_val[self.X_val > self.X_train.max()] = self.X_train.max()
# self.X_val[:, feature_no_info] = 0
X_test = scaler.transform(X_test)
if self.use_sigmoid_feature:
X_train = 1 / (1 + np.exp(-X_train))
X_val = 1 / (1 + np.exp(-X_val))
X_test = 1 / (1 + np.exp(-X_test))
if self.use_tanh_feature:
X_train = np.tanh(X_train)
X_val = np.tanh(X_val)
X_test = np.tanh(X_test)
if self.use_MinMaxScaler:
scaler = MinMaxScaler(feature_range=(0, 1))
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
X_test = scaler.transform(X_test)
if self.clip_Xval_Xtest is not None:
logging.info("Before cliping,\n"
"Val data (min,max) = ({}, {})\n"
"Test data (min,max) = ({}, {})".format(
X_val.min(),
X_val.max(),
X_test.min(),
X_test.max(),
))
X_val = np.clip(X_val, self.clip_Xval_Xtest[0], self.clip_Xval_Xtest[1])
X_test = np.clip(X_test, self.clip_Xval_Xtest[0], self.clip_Xval_Xtest[1])
return X_train, X_val, X_test
def normalize_data(self):
self.X_train = np.nan_to_num(self.X_train)
self.X_val = np.nan_to_num(self.X_val)
self.X_test = np.nan_to_num(self.X_test)
self.X_train, self.X_val, self.X_test = self._normalize_rna(self.X_train, self.X_val, self.X_test)
def run_exp(self, model_name, model_suffix, params, com_mat, repeat, fold,
community_filter=None, com_mat_fully=None):
logging.info("Running {} repeat {} fold {}".format(model_name, repeat, fold))
output_prefix = model_name + model_suffix
if 'random_predictor' in model_name:
self.compute_metric(None, 'test', model_name, model_suffix, self.y_train, self.y_test, com_mat, repeat,
self.y_test2)
elif 'mean_control' in model_name:
# self.compute_metric(cm, 'train', model_name, model_suffix, self.y_train, self.y_train, com_mat, repeat,
# self.y_train2)
# self.compute_metric(cm, 'val', model_name, model_suffix, self.y_train, self.y_val, com_mat, repeat,
# self.y_val2)
self.compute_metric(None, 'test', model_name, model_suffix, self.y_train, self.y_test, com_mat, repeat,
self.y_test2)
elif 'expression_control' in model_name:
self.compute_metric(None, 'test', model_name, model_suffix, self.X_test, self.y_test, com_mat, repeat,
self.y_test2)
elif 'random_forest' in model_name:
sk_all = []
params['n_jobs'] = -1
for i in range(self.y_train.shape[1]):
sk = SklearnModel(model_name + model_suffix, params)
sk.train(self.X_train, self.y_train[:, i])
sk_all.append(sk)
self.compute_metric(sk_all, 'train', model_name, model_suffix, self.X_train, self.y_train, com_mat, repeat,
self.y_train2)
self.compute_metric(sk_all, 'val', model_name, model_suffix, self.X_val, self.y_val, com_mat, repeat,
self.y_val2)
self.compute_metric(sk_all, 'test', model_name, model_suffix, self.X_test, self.y_test, com_mat, repeat,
self.y_test2)
else:
if self.use_community_filter:
cm = BioVNNmodel(model_name + model_suffix, params, self.result_path,
self.community_hierarchy_dicts_all, community_filter,
class_weight_neg=self.class_weight_neg,
class_weight_pos=self.class_weight_pos,
sample_class_weight_neg=self.sample_class_weight_neg,
sample_class_weight_pos=self.sample_class_weight_pos,
group_level_dict=self.community_level_dict,
level_group_dict=self.level_community_dict)
else:
cm = BioVNNmodel(model_name + model_suffix, params, self.result_path,
self.community_hierarchy_dicts_all,
class_weight_neg=self.class_weight_neg,
class_weight_pos=self.class_weight_pos,
sample_class_weight_neg=self.sample_class_weight_neg,
sample_class_weight_pos=self.sample_class_weight_pos,
group_level_dict=self.community_level_dict,
level_group_dict=self.level_community_dict)
if hasattr(self, 'load_result_dir'):
load_ckpt = os.path.join(self.load_result_dir,
'{}_{}_{}.tar'.format(model_name + model_suffix, self.model_v,
params['seed']))
cm.train(self.X_train, com_mat, self.y_train, load_weight_dir=load_ckpt, mask_fully=com_mat_fully)
else:
y_val_index = self.idx['val']
y_col = self.y.columns
cm.train(self.X_train, com_mat, self.y_train, None, self.X_val, self.y_val, y_train2=self.y_train2,
y_val2=self.y_val2, output_prefix=output_prefix, y_val_index=y_val_index, y_col=y_col,
mask_fully=com_mat_fully)
self._clear_gpu(model_name, model_suffix)
cm.train(self.X_train, com_mat, self.y_train, mask_fully=com_mat_fully)
# self.analyze_weights(cm, model_name, model_suffix)
self._clear_gpu(model_name, model_suffix)
self.compute_metric(cm, 'train', model_name, model_suffix, self.X_train, self.y_train, com_mat, repeat,
self.y_train2)
self._clear_gpu(model_name, model_suffix)
self.compute_metric(cm, 'val', model_name, model_suffix, self.X_val, self.y_val, com_mat, repeat,
self.y_val2)
self._clear_gpu(model_name, model_suffix)
self.compute_metric(cm, 'test', model_name, model_suffix, self.X_test, self.y_test, com_mat, repeat,
self.y_test2)
self._clear_gpu(model_name, model_suffix)
model_suffix = str(params['seed']) + 'repeat' + str(repeat)
self.compute_metric_all_test('test', model_name, model_suffix, self.X_test, self.y_test, repeat)
self.output_metric()
def run_exp_cv(self, model_name, model_suffix, params, com_mat, repeat, fold,
community_filter=None, com_mat_fully=None, grid_name=None):
logging.info("Running {}".format(model_suffix))
if 'random_forest' in self.run_mode:
embed()
sys.exit(0)
params['n_jobs'] = -1
sk = SklearnModel(model_name + model_suffix, params)
sk.train(self.X_train, self.y_train)
self.compute_metric(sk, 'train', model_name, model_suffix, self.X_train, self.y_train, com_mat, repeat)
self.compute_metric(sk, 'val', model_name, model_suffix, self.X_val, self.y_val, com_mat, repeat)
# sk_all = []
# for i in range(self.y_train.shape[1]):
# sk = SklearnModel(model_name + model_suffix, params)
# sk.train(self.X_train, self.y_train[:, i])
# sk_all.append(sk)
#
# self.compute_metric(sk_all, 'train', model_name, model_suffix, self.X_train, self.y_train, com_mat, repeat)
# self.compute_metric(sk_all, 'val', model_name, model_suffix, self.X_val, self.y_val, com_mat, repeat)
else:
if self.use_community_filter:
cm = BioVNNmodel(model_name + model_suffix, params, self.result_path,
self.community_hierarchy_dicts_all, community_filter,
class_weight_neg=self.class_weight_neg,
class_weight_pos=self.class_weight_pos,
sample_class_weight_neg=self.sample_class_weight_neg,
sample_class_weight_pos=self.sample_class_weight_pos,
group_level_dict=self.community_level_dict,
level_group_dict=self.level_community_dict)
else:
cm = BioVNNmodel(model_name + model_suffix, params, self.result_path,
self.community_hierarchy_dicts_all,
class_weight_neg=self.class_weight_neg,
class_weight_pos=self.class_weight_pos,
sample_class_weight_neg=self.sample_class_weight_neg,
sample_class_weight_pos=self.sample_class_weight_pos,
group_level_dict=self.community_level_dict,
level_group_dict=self.level_community_dict)
y_val_index = self.idx['val']
y_col = self.y.columns
output_prefix = model_name + model_suffix
cm.train(self.X_train, com_mat, self.y_train, None, self.X_val, self.y_val, y_train2=self.y_train2,
y_val2=self.y_val2, output_prefix=output_prefix, y_val_index=y_val_index, y_col=y_col,
mask_fully=com_mat_fully)
self._clear_gpu(model_name, model_suffix)
cm.train(self.X_train, com_mat, self.y_train)
self.compute_metric(cm, 'val', model_name, model_suffix, self.X_val, self.y_val, com_mat, repeat,
self.y_val2)
self._clear_gpu(model_name, model_suffix)
if not self.save_model_ckpt:
cm._rm_ckpt()
self.output_metric()
model_suffix = str(params['seed']) + 'repeat' + str(repeat) + '_' + grid_name
self.compute_metric_all_test('val', model_name, model_suffix, self.X_test, self.y_test, repeat)
self.output_metric()
metric_output = {}
for x in self.metric_output:
if self.metric_output[x].shape[0] > 0:
df = self.metric_output[x].copy()
df = df.loc[['fold' not in y for y in df.index]]
if df.shape[0] > 0:
grid_df = self.grid_df.copy().T
grid_df.index = df.index
metric_output[x] = pd.concat([df, grid_df], axis=1)
self.output_metric(metric_output, '_all')
def perform_cv(self, model_name, params):
grid = ParameterGrid(params['grid_search'])
params_backbone = params.copy()
self.grid_df = pd.DataFrame()
logging.info("{} points are searching in grid".format(len(grid)))
for i, param_grid in enumerate(grid):
self.grid_df = pd.concat([self.grid_df, | pd.Series(param_grid, name=i) | pandas.Series |
import pandas as pd
import numpy as np
import os
from tqdm import tqdm
import tempfile
import logging
from glob import glob
from pyarrow import ArrowIOError
from pyarrow.lib import ArrowInvalid
import time
import random
from pandas.api.types import is_numeric_dtype
from polyfun_utils import Logger, check_package_versions, set_snpid_index, configure_logger
from scipy.optimize import nnls
def splash_screen():
print('*********************************************************************')
print('* PolyPred (POLYgenic risk PREDiction)')
print('* Version 1.0.0')
print('* (C) 2020-2021 <NAME>')
print('*********************************************************************')
print()
def create_plink_range_file(df_betas, temp_dir, num_jk=200):
#make sure that the df_betas is ordered
is_chr_ordered = np.all(np.diff(df_betas['CHR']) >= 0)
is_bp_ordered = True
for chr_num, df_chr in df_betas.groupby('CHR'):
is_bp_ordered = is_bp_ordered & (np.all(np.diff(df_chr['BP']) >= 0))
if not is_bp_ordered: break
if not is_bp_ordered or not is_chr_ordered:
df_betas.sort_values(['CHR', 'BP'], inplace=True)
#bound num_jk by num_snps if we have very few SNPs
num_jk = np.minimum(num_jk, df_betas.shape[0])
#create df_ranges
ranges_file = os.path.join(temp_dir, 'ranges.txt')
ranges_list = [{'block':'block%d'%(block), 'lb':str(block), 'ub':str(block+1)} for block in range(1,num_jk+1)]
df_ranges = | pd.DataFrame(ranges_list) | pandas.DataFrame |
import pandas as pd
from classification import (
Hierarchy,
repeated_table_to_parent_id_table,
parent_code_table_to_parent_id_table,
Classification,
)
if __name__ == "__main__":
df = pd.read_csv(
"in/cat_localidad_MAY2015.csv.gz", encoding="ISO-8859-2", compression="gzip"
)
df.columns = [
"state_code",
"state_name",
"state_name_short",
"municipality_code",
"municipality_name",
"locality_code",
"locality_name",
"latitude",
"longitude",
"altitude",
"map_code",
"ambito",
"population_total",
"population_male",
"population_female",
"dwellings_occupied",
]
df = df[
[
"state_code",
"state_name",
"municipality_code",
"municipality_name",
"locality_code",
"locality_name",
]
]
df.state_code = df.state_code.astype(str).str.zfill(2)
df.municipality_code = df.municipality_code.astype(str).str.zfill(3)
df.locality_code = df.locality_code.astype(str).str.zfill(4)
df.municipality_code = df.state_code + df.municipality_code
df.locality_code = df.municipality_code + df.locality_code
df.state_name = df.state_name.str.title()
df.municipality_name = df.municipality_name.str.title()
df.locality_name = df.locality_name.str.title()
df = df.rename(
columns={
"state_name": "name_en_state",
"municipality_name": "name_en_municipality",
"locality_name": "name_en_locality",
}
)
h = Hierarchy(["state", "municipality", "locality"])
parent_code_table = repeated_table_to_parent_id_table(
df,
h,
level_fields={
"state": ["name_en_state"],
"municipality": ["name_en_municipality"],
"locality": ["name_en_locality"],
},
)
# TODO: This isn't the official classification level name but this makes
# compatibility between colombia and mexico way easier
parent_code_table.loc[parent_code_table.level == "state", "level"] = "department"
# Drop the "locality" level since we don't use it
parent_code_table = parent_code_table[parent_code_table.level != "locality"]
# This adds a highest level element that represents the whole country
mex = pd.Series({"code": "MEX", "name_en": "Mexico", "level": "country"})
parent_code_table.loc[
parent_code_table.level == "department", "parent_code"
] = "MEX"
parent_code_table = pd.concat([ | pd.DataFrame(mex) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""reV-to-SAM generation interface module.
Wraps the NREL-PySAM pvwattsv5, windpower, and tcsmolensalt modules with
additional reV features.
"""
from abc import ABC, abstractmethod
import copy
import os
import logging
import numpy as np
import pandas as pd
from warnings import warn
import PySAM.Pvwattsv5 as PySamPv5
import PySAM.Pvwattsv7 as PySamPv7
import PySAM.Pvsamv1 as PySamDetailedPv
import PySAM.Windpower as PySamWindPower
import PySAM.TcsmoltenSalt as PySamCSP
import PySAM.Swh as PySamSwh
import PySAM.TroughPhysicalProcessHeat as PySamTpph
import PySAM.LinearFresnelDsgIph as PySamLds
import PySAM.MhkWave as PySamMhkWave
from reV.SAM.defaults import (DefaultPvWattsv5,
DefaultPvWattsv7,
DefaultPvSamv1,
DefaultWindPower,
DefaultTcsMoltenSalt,
DefaultSwh,
DefaultTroughPhysicalProcessHeat,
DefaultLinearFresnelDsgIph,
DefaultMhkWave)
from reV.utilities.exceptions import (SAMInputWarning, SAMExecutionError,
InputError)
from reV.utilities.curtailment import curtail
from reV.SAM.SAM import RevPySam
from reV.SAM.econ import LCOE, SingleOwner
logger = logging.getLogger(__name__)
class AbstractSamGeneration(RevPySam, ABC):
"""Base class for SAM generation simulations."""
def __init__(self, resource, meta, sam_sys_inputs, site_sys_inputs=None,
output_request=None, drop_leap=False):
"""Initialize a SAM generation object.
Parameters
----------
resource : pd.DataFrame
Timeseries solar or wind resource data for a single location with a
pandas DatetimeIndex. There must be columns for all the required
variables to run the respective SAM simulation. Remapping will be
done to convert typical NSRDB/WTK names into SAM names (e.g. DNI ->
dn and wind_speed -> windspeed)
meta : pd.DataFrame | pd.Series
Meta data corresponding to the resource input for the single
location. Should include values for latitude, longitude, elevation,
and timezone.
sam_sys_inputs : dict
Site-agnostic SAM system model inputs arguments.
site_sys_inputs : dict
Optional set of site-specific SAM system inputs to complement the
site-agnostic inputs.
output_request : list
Requested SAM outputs (e.g., 'cf_mean', 'annual_energy',
'cf_profile', 'gen_profile', 'energy_yield', 'ppa_price',
'lcoe_fcr').
drop_leap : bool
Drops February 29th from the resource data. If False, December
31st is dropped from leap years.
"""
# drop the leap day
if drop_leap:
resource = self.drop_leap(resource)
# make sure timezone and elevation are in the meta data
meta = self.tz_elev_check(sam_sys_inputs, site_sys_inputs, meta)
# don't pass resource to base class,
# set in concrete generation classes instead
super().__init__(meta, sam_sys_inputs, output_request,
site_sys_inputs=site_sys_inputs)
# Set the site number using resource
if hasattr(resource, 'name'):
self._site = resource.name
else:
self._site = None
self.check_resource_data(resource)
self.set_resource_data(resource, meta)
@classmethod
def _get_res(cls, res_df, output_request):
"""Get the resource arrays and pass through for output (single site).
Parameters
----------
res_df : pd.DataFrame
2D table with resource data.
output_request : list
Outputs to retrieve from SAM.
Returns
-------
res_mean : dict | None
Dictionary object with variables for resource arrays.
out_req_cleaned : list
Output request list with the resource request entries removed.
"""
out_req_cleaned = copy.deepcopy(output_request)
res_out = None
res_reqs = []
ti = res_df.index
for req in out_req_cleaned:
if req in res_df:
res_reqs.append(req)
if res_out is None:
res_out = {}
res_out[req] = cls.ensure_res_len(res_df[req].values, ti)
for req in res_reqs:
out_req_cleaned.remove(req)
return res_out, out_req_cleaned
@staticmethod
def _get_res_mean(resource, res_gid, output_request):
"""Get the resource annual means (single site).
Parameters
----------
resource : rex.sam_resource.SAMResource
SAM resource object for WIND resource
res_gid : int
Site to extract means for
output_request : list
Outputs to retrieve from SAM.
Returns
-------
res_mean : dict | None
Dictionary object with variables for resource means.
out_req_nomeans : list
Output request list with the resource mean entries removed.
"""
out_req_nomeans = copy.deepcopy(output_request)
res_mean = None
idx = resource.sites.index(res_gid)
irrad_means = ('dni_mean', 'dhi_mean', 'ghi_mean')
if 'ws_mean' in out_req_nomeans:
out_req_nomeans.remove('ws_mean')
res_mean = {}
res_mean['ws_mean'] = resource['mean_windspeed', idx]
else:
for var in resource.var_list:
label_1 = '{}_mean'.format(var)
label_2 = 'mean_{}'.format(var)
if label_1 in out_req_nomeans:
out_req_nomeans.remove(label_1)
if res_mean is None:
res_mean = {}
res_mean[label_1] = resource[label_2, idx]
if label_1 in irrad_means:
# convert to kWh/m2/day
res_mean[label_1] /= 1000
res_mean[label_1] *= 24
return res_mean, out_req_nomeans
def check_resource_data(self, resource):
"""Check resource dataframe for NaN values
Parameters
----------
resource : pd.DataFrame
Timeseries solar or wind resource data for a single location with a
pandas DatetimeIndex. There must be columns for all the required
variables to run the respective SAM simulation. Remapping will be
done to convert typical NSRDB/WTK names into SAM names (e.g. DNI ->
dn and wind_speed -> windspeed)
"""
if pd.isna(resource).any().any():
bad_vars = pd.isna(resource).any(axis=0)
bad_vars = resource.columns[bad_vars].values.tolist()
msg = ('Found NaN values for site {} in variables {}'
.format(self.site, bad_vars))
logger.error(msg)
raise InputError(msg)
@abstractmethod
def set_resource_data(self, resource, meta):
"""Placeholder for resource data setting (nsrdb or wtk)"""
@staticmethod
def tz_elev_check(sam_sys_inputs, site_sys_inputs, meta):
"""Check timezone+elevation input and use json config
timezone+elevation if not in resource meta.
Parameters
----------
sam_sys_inputs : dict
Site-agnostic SAM system model inputs arguments.
site_sys_inputs : dict
Optional set of site-specific SAM system inputs to complement the
site-agnostic inputs.
meta : pd.DataFrame
1D table with resource meta data.
Returns
-------
meta : pd.DataFrame
1D table with resource meta data. Will include "timezone"
and "elevation" from the sam and site system inputs if found.
"""
if meta is not None:
if sam_sys_inputs is not None:
if 'elevation' in sam_sys_inputs:
meta['elevation'] = sam_sys_inputs['elevation']
if 'timezone' in sam_sys_inputs:
meta['timezone'] = int(sam_sys_inputs['timezone'])
# site-specific inputs take priority over generic system inputs
if site_sys_inputs is not None:
if 'elevation' in site_sys_inputs:
meta['elevation'] = site_sys_inputs['elevation']
if 'timezone' in site_sys_inputs:
meta['timezone'] = int(site_sys_inputs['timezone'])
if 'timezone' not in meta:
msg = ('Need timezone input to run SAM gen. Not found in '
'resource meta or technology json input config.')
raise SAMExecutionError(msg)
return meta
@property
def has_timezone(self):
""" Returns true if instance has a timezone set """
if self._meta is not None:
if 'timezone' in self.meta:
return True
return False
def cf_mean(self):
"""Get mean capacity factor (fractional) from SAM.
Returns
-------
output : float
Mean capacity factor (fractional).
"""
return self['capacity_factor'] / 100
def cf_profile(self):
"""Get hourly capacity factor (frac) profile in orig timezone.
Returns
-------
cf_profile : np.ndarray
1D numpy array of capacity factor profile.
Datatype is float32 and array length is 8760*time_interval.
"""
return self.gen_profile() / self.sam_sys_inputs['system_capacity']
def annual_energy(self):
"""Get annual energy generation value in kWh from SAM.
Returns
-------
output : float
Annual energy generation (kWh).
"""
return self['annual_energy']
def energy_yield(self):
"""Get annual energy yield value in kwh/kw from SAM.
Returns
-------
output : float
Annual energy yield (kwh/kw).
"""
return self['kwh_per_kw']
def gen_profile(self):
"""Get power generation profile (orig timezone) in kW.
Returns
-------
output : np.ndarray
1D array of hourly power generation in kW.
Datatype is float32 and array length is 8760*time_interval.
"""
return np.array(self['gen'], dtype=np.float32)
def collect_outputs(self, output_lookup=None):
"""
Collect SAM gen output_request. Rolls outputs to UTC if appropriate.
Parameters
----------
output_lookup : dict | None
Lookup dictionary mapping output keys to special output methods.
None defaults to generation default outputs.
"""
if output_lookup is None:
output_lookup = {'cf_mean': self.cf_mean,
'cf_profile': self.cf_profile,
'annual_energy': self.annual_energy,
'energy_yield': self.energy_yield,
'gen_profile': self.gen_profile,
}
super().collect_outputs(output_lookup=output_lookup)
def run_gen_and_econ(self):
"""Run SAM generation with possibility for follow on econ analysis."""
lcoe_out_reqs = None
so_out_reqs = None
lcoe_vars = ('lcoe_fcr', 'fixed_charge_rate', 'capital_cost',
'fixed_operating_cost', 'variable_operating_cost')
so_vars = ('ppa_price', 'lcoe_real', 'lcoe_nom',
'project_return_aftertax_npv', 'flip_actual_irr',
'gross_revenue')
if 'lcoe_fcr' in self.output_request:
lcoe_out_reqs = [r for r in self.output_request if r in lcoe_vars]
self.output_request = [r for r in self.output_request
if r not in lcoe_out_reqs]
elif any(x in self.output_request for x in so_vars):
so_out_reqs = [r for r in self.output_request if r in so_vars]
self.output_request = [r for r in self.output_request
if r not in so_out_reqs]
# Execute the SAM generation compute module (pvwattsv7, windpower, etc)
self.run()
# Execute a follow-on SAM econ compute module
# (lcoe_fcr, singleowner, etc)
if lcoe_out_reqs is not None:
self.sam_sys_inputs['annual_energy'] = self.annual_energy()
lcoe = LCOE(self.sam_sys_inputs, output_request=lcoe_out_reqs)
lcoe.assign_inputs()
lcoe.execute()
lcoe.collect_outputs()
lcoe.outputs_to_utc_arr()
self.outputs.update(lcoe.outputs)
elif so_out_reqs is not None:
self.sam_sys_inputs['gen'] = self.gen_profile()
so = SingleOwner(self.sam_sys_inputs, output_request=so_out_reqs)
so.assign_inputs()
so.execute()
so.collect_outputs()
so.outputs_to_utc_arr()
self.outputs.update(so.outputs)
def run(self):
"""Run a reV-SAM generation object by assigning inputs, executing the
SAM simulation, collecting outputs, and converting all arrays to UTC.
"""
self.assign_inputs()
self.execute()
self.collect_outputs()
self.outputs_to_utc_arr()
@classmethod
def reV_run(cls, points_control, res_file, site_df,
output_request=('cf_mean',), drop_leap=False,
gid_map=None):
"""Execute SAM generation based on a reV points control instance.
Parameters
----------
points_control : config.PointsControl
PointsControl instance containing project points site and SAM
config info.
res_file : str
Resource file with full path.
site_df : pd.DataFrame
Dataframe of site-specific input variables. Row index corresponds
to site number/gid (via df.loc not df.iloc), column labels are the
variable keys that will be passed forward as SAM parameters.
output_request : list | tuple
Outputs to retrieve from SAM.
drop_leap : bool
Drops February 29th from the resource data. If False, December
31st is dropped from leap years.
gid_map : None | dict
Mapping of unique integer generation gids (keys) to single integer
resource gids (values). This enables the user to input unique
generation gids in the project points that map to non-unique
resource gids. This can be None or a pre-extracted dict.
Returns
-------
out : dict
Nested dictionaries where the top level key is the site index,
the second level key is the variable name, second level value is
the output variable value.
"""
# initialize output dictionary
out = {}
# Get the RevPySam resource object
resources = RevPySam.get_sam_res(res_file,
points_control.project_points,
points_control.project_points.tech,
output_request=output_request,
gid_map=gid_map)
# run resource through curtailment filter if applicable
curtailment = points_control.project_points.curtailment
if curtailment is not None:
resources = curtail(resources, curtailment,
random_seed=curtailment.random_seed)
# iterate through project_points gen_gid values
for gen_gid in points_control.project_points.sites:
# Lookup the resource gid if there's a mapping and get the resource
# data from the SAMResource object using the res_gid.
res_gid = gen_gid if gid_map is None else gid_map[gen_gid]
site_res_df, site_meta = resources._get_res_df(res_gid)
# drop the leap day
if drop_leap:
site_res_df = cls.drop_leap(site_res_df)
_, inputs = points_control.project_points[gen_gid]
# get resource data pass-throughs and resource means
res_outs, out_req_cleaned = cls._get_res(site_res_df,
output_request)
res_mean, out_req_cleaned = cls._get_res_mean(resources, res_gid,
out_req_cleaned)
# iterate through requested sites.
sim = cls(resource=site_res_df, meta=site_meta,
sam_sys_inputs=inputs, output_request=out_req_cleaned,
site_sys_inputs=dict(site_df.loc[gen_gid, :]))
sim.run_gen_and_econ()
# collect outputs to dictout
out[gen_gid] = sim.outputs
if res_outs is not None:
out[gen_gid].update(res_outs)
if res_mean is not None:
out[gen_gid].update(res_mean)
return out
class AbstractSamSolar(AbstractSamGeneration, ABC):
"""Base Class for Solar generation from SAM"""
def set_resource_data(self, resource, meta):
"""Set NSRDB resource data arrays.
Parameters
----------
resource : pd.DataFrame
Timeseries solar or wind resource data for a single location with a
pandas DatetimeIndex. There must be columns for all the required
variables to run the respective SAM simulation. Remapping will be
done to convert typical NSRDB/WTK names into SAM names (e.g. DNI ->
dn and wind_speed -> windspeed)
meta : pd.DataFrame | pd.Series
Meta data corresponding to the resource input for the single
location. Should include values for latitude, longitude, elevation,
and timezone.
"""
time_index = resource.index
self.time_interval = self.get_time_interval(resource.index.values)
# map resource data names to SAM required data names
var_map = {'dni': 'dn',
'dhi': 'df',
'ghi': 'gh',
'clearskydni': 'dn',
'clearskydhi': 'df',
'clearskyghi': 'gh',
'windspeed': 'wspd',
'airtemperature': 'tdry',
'temperature': 'tdry',
'temp': 'tdry',
'dewpoint': 'tdew',
'surfacepressure': 'pres',
'pressure': 'pres',
'surfacealbedo': 'albedo',
}
lower_case = {k: k.lower().replace(' ', '').replace('_', '')
for k in resource.columns}
irrad_vars = ['dn', 'df', 'gh']
resource = resource.rename(mapper=lower_case, axis='columns')
resource = resource.rename(mapper=var_map, axis='columns')
time_index = resource.index
resource = {k: np.array(v) for (k, v) in
resource.to_dict(orient='list').items()}
# set resource variables
for var, arr in resource.items():
if var != 'time_index':
# ensure that resource array length is multiple of 8760
arr = np.roll(
self.ensure_res_len(arr, time_index),
int(self._meta['timezone'] * self.time_interval))
if var in irrad_vars:
if np.min(arr) < 0:
warn('Solar irradiance variable "{}" has a minimum '
'value of {}. Truncating to zero.'
.format(var, np.min(arr)), SAMInputWarning)
arr = np.where(arr < 0, 0, arr)
resource[var] = arr.tolist()
resource['lat'] = meta['latitude']
resource['lon'] = meta['longitude']
resource['tz'] = meta['timezone']
if 'elevation' in meta:
resource['elev'] = meta['elevation']
else:
resource['elev'] = 0.0
time_index = self.ensure_res_len(time_index, time_index)
resource['minute'] = time_index.minute
resource['hour'] = time_index.hour
resource['year'] = time_index.year
resource['month'] = time_index.month
resource['day'] = time_index.day
if 'albedo' in resource:
self['albedo'] = resource.pop('albedo')
self['solar_resource_data'] = resource
class AbstractSamPv(AbstractSamSolar, ABC):
"""Photovoltaic (PV) generation with either pvwatts of detailed pv.
"""
# set these class attrs in concrete subclasses
MODULE = None
PYSAM = None
def __init__(self, resource, meta, sam_sys_inputs, site_sys_inputs=None,
output_request=None, drop_leap=False):
"""Initialize a SAM solar object.
Parameters
----------
resource : pd.DataFrame
Timeseries solar or wind resource data for a single location with a
pandas DatetimeIndex. There must be columns for all the required
variables to run the respective SAM simulation. Remapping will be
done to convert typical NSRDB/WTK names into SAM names (e.g. DNI ->
dn and wind_speed -> windspeed)
meta : pd.DataFrame | pd.Series
Meta data corresponding to the resource input for the single
location. Should include values for latitude, longitude, elevation,
and timezone.
sam_sys_inputs : dict
Site-agnostic SAM system model inputs arguments.
site_sys_inputs : dict
Optional set of site-specific SAM system inputs to complement the
site-agnostic inputs.
output_request : list
Requested SAM outputs (e.g., 'cf_mean', 'annual_energy',
'cf_profile', 'gen_profile', 'energy_yield', 'ppa_price',
'lcoe_fcr').
drop_leap : bool
Drops February 29th from the resource data. If False, December
31st is dropped from leap years.
"""
# need to check tilt=lat and azimuth for pv systems
sam_sys_inputs = self.set_latitude_tilt_az(sam_sys_inputs, meta)
super().__init__(resource, meta, sam_sys_inputs,
site_sys_inputs=site_sys_inputs,
output_request=output_request,
drop_leap=drop_leap)
@staticmethod
def set_latitude_tilt_az(sam_sys_inputs, meta):
"""Check if tilt is specified as latitude and set tilt=lat, az=180 or 0
Parameters
----------
sam_sys_inputs : dict
Site-agnostic SAM system model inputs arguments.
meta : pd.DataFrame
1D table with resource meta data.
Returns
-------
sam_sys_inputs : dict
Site-agnostic SAM system model inputs arguments.
If for a pv simulation the "tilt" parameter was originally not
present or set to 'lat' or 'latitude', the tilt will be set to
the absolute value of the latitude found in meta and the azimuth
will be 180 if lat>0, 0 if lat<0.
"""
set_tilt = False
if sam_sys_inputs is not None and meta is not None:
if 'tilt' not in sam_sys_inputs:
warn('No tilt specified, setting at latitude.',
SAMInputWarning)
set_tilt = True
else:
if (sam_sys_inputs['tilt'] == 'lat'
or sam_sys_inputs['tilt'] == 'latitude'):
set_tilt = True
if set_tilt:
# set tilt to abs(latitude)
sam_sys_inputs['tilt'] = np.abs(meta['latitude'])
if meta['latitude'] > 0:
# above the equator, az = 180
sam_sys_inputs['azimuth'] = 180
else:
# below the equator, az = 0
sam_sys_inputs['azimuth'] = 0
logger.debug('Tilt specified at "latitude", setting tilt to: {}, '
'azimuth to: {}'
.format(sam_sys_inputs['tilt'],
sam_sys_inputs['azimuth']))
return sam_sys_inputs
def cf_mean(self):
"""Get mean capacity factor (fractional) from SAM.
NOTE: PV capacity factor is the AC power production / the DC nameplate
Returns
-------
output : float
Mean capacity factor (fractional).
PV CF is calculated as AC power / DC nameplate.
"""
return self['capacity_factor'] / 100
def cf_profile(self):
"""Get hourly capacity factor (frac) profile in orig timezone.
NOTE: PV capacity factor is the AC power production / the DC nameplate
Returns
-------
cf_profile : np.ndarray
1D numpy array of capacity factor profile.
Datatype is float32 and array length is 8760*time_interval.
PV CF is calculated as AC power / DC nameplate.
"""
return self.gen_profile() / self.sam_sys_inputs['system_capacity']
def gen_profile(self):
"""Get AC inverter power generation profile (orig timezone) in kW.
This is an alias of the "ac" SAM output variable.
Returns
-------
output : np.ndarray
1D array of AC inverter power generation in kW.
Datatype is float32 and array length is 8760*time_interval.
"""
return self.ac()
def ac(self):
"""Get AC inverter power generation profile (orig timezone) in kW.
Returns
-------
output : np.ndarray
1D array of AC inverter power generation in kW.
Datatype is float32 and array length is 8760*time_interval.
"""
return np.array(self['ac'], dtype=np.float32) / 1000
def dc(self):
"""
Get DC array power generation profile (orig timezone) in kW.
Returns
-------
output : np.ndarray
1D array of DC array power generation in kW.
Datatype is float32 and array length is 8760*time_interval.
"""
return np.array(self['dc'], dtype=np.float32) / 1000
def clipped_power(self):
"""
Get the clipped DC power generated behind the inverter
(orig timezone) in kW.
Returns
-------
clipped : np.ndarray
1D array of clipped DC power in kW.
Datatype is float32 and array length is 8760*time_interval.
"""
ac = self.ac()
dc = self.dc()
return np.where(ac < ac.max(), 0, dc - ac)
@staticmethod
@abstractmethod
def default():
"""Get the executed default pysam object."""
def collect_outputs(self, output_lookup=None):
"""Collect SAM gen output_request.
Parameters
----------
output_lookup : dict | None
Lookup dictionary mapping output keys to special output methods.
None defaults to generation default outputs.
"""
if output_lookup is None:
output_lookup = {'cf_mean': self.cf_mean,
'cf_profile': self.cf_profile,
'annual_energy': self.annual_energy,
'energy_yield': self.energy_yield,
'gen_profile': self.gen_profile,
'ac': self.ac,
'dc': self.dc,
'clipped_power': self.clipped_power
}
super().collect_outputs(output_lookup=output_lookup)
class PvWattsv5(AbstractSamPv):
"""Photovoltaic (PV) generation with pvwattsv5.
"""
MODULE = 'pvwattsv5'
PYSAM = PySamPv5
@staticmethod
def default():
"""Get the executed default pysam PVWATTSV5 object.
Returns
-------
PySAM.Pvwattsv5
"""
return DefaultPvWattsv5.default()
class PvWattsv7(AbstractSamPv):
"""Photovoltaic (PV) generation with pvwattsv7.
"""
MODULE = 'pvwattsv7'
PYSAM = PySamPv7
@staticmethod
def default():
"""Get the executed default pysam PVWATTSV7 object.
Returns
-------
PySAM.Pvwattsv7
"""
return DefaultPvWattsv7.default()
class PvSamv1(AbstractSamPv):
"""Detailed PV model"""
MODULE = 'Pvsamv1'
PYSAM = PySamDetailedPv
def ac(self):
"""Get AC inverter power generation profile (orig timezone) in kW.
Returns
-------
output : np.ndarray
1D array of AC inverter power generation in kW.
Datatype is float32 and array length is 8760*time_interval.
"""
return np.array(self['gen'], dtype=np.float32)
def dc(self):
"""
Get DC array power generation profile (orig timezone) in kW.
Returns
-------
output : np.ndarray
1D array of DC array power generation in kW.
Datatype is float32 and array length is 8760*time_interval.
"""
return np.array(self['dc_net'], dtype=np.float32)
@staticmethod
def default():
"""Get the executed default pysam Pvsamv1 object.
Returns
-------
PySAM.Pvsamv1
"""
return DefaultPvSamv1.default()
class TcsMoltenSalt(AbstractSamSolar):
"""Concentrated Solar Power (CSP) generation with tower molten salt
"""
MODULE = 'tcsmolten_salt'
PYSAM = PySamCSP
def cf_profile(self):
"""Get absolute value hourly capacity factor (frac) profile in
orig timezone.
Returns
-------
cf_profile : np.ndarray
1D numpy array of capacity factor profile.
Datatype is float32 and array length is 8760*time_interval.
"""
x = np.abs(self.gen_profile() / self.sam_sys_inputs['system_capacity'])
return x
@staticmethod
def default():
"""Get the executed default pysam CSP object.
Returns
-------
PySAM.TcsmoltenSalt
"""
return DefaultTcsMoltenSalt.default()
class AbstractSamSolarThermal(AbstractSamSolar, ABC):
"""Base class for solar thermal """
PYSAM_WEATHER_TAG = None
def set_resource_data(self, resource, meta):
"""
Set NSRDB resource file. Overloads Solar.set_resource_data(). Solar
thermal PySAM models require a data file, not raw data.
Parameters
----------
resource : pd.DataFrame
Timeseries solar or wind resource data for a single location with a
pandas DatetimeIndex. There must be columns for all the required
variables to run the respective SAM simulation. Remapping will be
done to convert typical NSRDB/WTK names into SAM names (e.g. DNI ->
dn and wind_speed -> windspeed)
meta : pd.DataFrame | pd.Series
Meta data corresponding to the resource input for the single
location. Should include values for latitude, longitude, elevation,
and timezone.
"""
self.time_interval = self.get_time_interval(resource.index.values)
pysam_w_fname = self._create_pysam_wfile(resource, meta)
# pylint: disable=E1101
self[self.PYSAM_WEATHER_TAG] = pysam_w_fname
def _create_pysam_wfile(self, resource, meta):
"""
Create PySAM weather input file. PySAM will not accept data on Feb
29th. For leap years, December 31st is dropped and time steps are
shifted to relabel Feb 29th as March 1st, March 1st as March 2nd, etc.
Parameters
----------
resource : pd.DataFrame
Timeseries solar or wind resource data for a single location with a
pandas DatetimeIndex. There must be columns for all the required
variables to run the respective SAM simulation. Remapping will be
done to convert typical NSRDB/WTK names into SAM names (e.g. DNI ->
dn and wind_speed -> windspeed)
meta : pd.DataFrame | pd.Series
Meta data corresponding to the resource input for the single
location. Should include values for latitude, longitude, elevation,
and timezone.
Returns
-------
fname : string
Name of weather csv file
"""
fname = '{}_weather.csv'.format(self._site)
logger.debug('Creating PySAM weather data file: {}'.format(fname))
# ------- Process metadata
m = pd.DataFrame(meta).T
timezone = m['timezone']
m['Source'] = 'NSRDB'
m['Location ID'] = meta.name
m['City'] = '-'
m['State'] = m['state'].apply(lambda x: '-' if x == 'None' else x)
m['Country'] = m['country'].apply(lambda x: '-' if x == 'None' else x)
m['Latitude'] = m['latitude']
m['Longitude'] = m['longitude']
m['Time Zone'] = timezone
m['Elevation'] = m['elevation']
m['Local Time Zone'] = timezone
m['Dew Point Units'] = 'c'
m['DHI Units'] = 'w/m2'
m['DNI Units'] = 'w/m2'
m['Temperature Units'] = 'c'
m['Pressure Units'] = 'mbar'
m['Wind Speed'] = 'm/s'
m = m.drop(['elevation', 'timezone', 'country', 'state', 'county',
'urban', 'population', 'landcover', 'latitude',
'longitude'], axis=1)
m.to_csv(fname, index=False, mode='w')
# --------- Process data
var_map = {'dni': 'DNI',
'dhi': 'DHI',
'wind_speed': 'Wind Speed',
'air_temperature': 'Temperature',
'dew_point': 'Dew Point',
'surface_pressure': 'Pressure',
}
resource = resource.rename(mapper=var_map, axis='columns')
time_index = resource.index
# Adjust from UTC to local time
local = np.roll(resource.values, int(timezone * self.time_interval),
axis=0)
resource = pd.DataFrame(local, columns=resource.columns,
index=time_index)
mask = (time_index.month == 2) & (time_index.day == 29)
time_index = time_index[~mask]
df = | pd.DataFrame(index=time_index) | pandas.DataFrame |
# Developed by <NAME>, <EMAIL>
# Version 3.05
# Last Updated April 13, 2020
# Purpose: To compile publicly-available CAISO system-wide electricity demand, supply, and emissions data into a csv file
# Currently configured to coninue downloading data until the most recent data has been downloaded.
# All directories and files will be created the first time you run the script
# Run in unbuffered mode to make sure time.sleep() works: $ python -u
#%%
from bs4 import BeautifulSoup
import csv
from datetime import datetime, timedelta
from functools import reduce
import math
import numpy as np
import openpyxl
import os
import pandas as pd
from pathlib import Path
import pytz
import requests
import selenium
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import ElementNotVisibleException
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.action_chains import ActionChains
import shelve
import sys
import time
start_i = time.time() #start initialization timer
#environment variables
demandURL = "http://www.caiso.com/TodaysOutlook/Pages/default.aspx"
supplyURL = "http://www.caiso.com/TodaysOutlook/Pages/supply.aspx"
emissionsURL = "http://www.caiso.com/TodaysOutlook/Pages/emissions.aspx"
curtailURL = "http://www.caiso.com/informed/Pages/ManagingOversupply.aspx#dailyCurtailment"
downloads = Path.cwd() / 'downloads'
curtailments = Path.cwd() / 'curtailments'
dataFile = Path.cwd() / "outputs/CAISOdata.csv"
dataFile_dtypes = {'month': 'uint8', 'day': 'uint8', 'weekday': 'uint8', 'hour': 'uint8', 'interval': 'uint8', \
'demand_DayAF': 'uint16', 'demand_HourAF': 'uint16', 'demand_actual': 'uint16', 'demand_net': 'uint16', \
'wind_curtail_MW': 'float32', 'solar_curtail_MW': 'float32', 'solar_MW': 'uint16', 'wind_MW': 'uint16', 'geothermal_MW': 'uint16', \
'biomass_MW': 'uint16', 'biogas_MW': 'uint16', 'sm_hydro_MW': 'uint16', 'battery_MW': 'int8', 'renewable_MW': 'uint16', 'natgas_MW': \
'uint16', 'lg_hydro_MW': 'uint16', 'imports_MW': 'int16', 'nuclear_MW': 'uint16', 'coal_MW': 'uint8', 'other_MW': 'uint8', 'imports_co2': \
'int16', 'natgas_co2': 'uint16', 'biogas_co2': 'uint16', 'biomass_co2': 'uint8', 'geothermal_co2': 'uint8', 'coal_co2': 'uint8'}
ct_dtypes = {'Hour': 'uint8', 'Interval': 'uint8', 'Wind Curtailment': 'float32', 'Solar Curtailment': 'float32'}
shelf = Path.cwd() / 'shelf.db'
def main():
#----- start initialization -----#
print(' Initializing...')
directories = ['outputs','downloads','curtailments']
for d in directories: #if the directories don't exist, create them
directory = Path.cwd() / d
if not directory.exists():
os.makedirs(d)
print(' '+str(d)+' directory created.')
if not Path(shelf.stem+'.db.dat').exists():
with shelve.open(str(shelf)) as s:
s['caiso'] = {
'latestDate': '',
'postDate': '',
'ct_latestDate': '',
}
user_initialized = 0 #track whether the start date is inputted by the user (1) or read from an existing output file (0)
if not dataFile.exists():
with open(dataFile, 'w+', newline=''):
pass
print(' New CSV output file created.\n Please check the date dropdown menu for one of the charts at http://www.caiso.com/TodaysOutlook/Pages/default.aspx \n and enter an available date to start data collection (formatted as "MM/DD/YYYY"):')
latestDate = input(' >')
user_initialized += 1
while True:
try:
latestDate = datetime.strptime(latestDate, '%m/%d/%Y')
break
except:
print(' Date format not recognized.\n Please enter a date formatted as "MM/DD/YYYY":')
latestDate = input(' >')
latestDate = datetime.strftime(latestDate - timedelta(days=1), '%m/%d/%Y')
with shelve.open(str(shelf), writeback=True) as s:
s['caiso']['latestDate'] = latestDate
#----- end initialization -----#
latest = checkLatest()
latestDate_dt = latest[0]
dataDate = latest[1]
browser = webdriverConfig() #configure the webdriver that will be used for data collection
yesterday = datetime.now() - timedelta(days=1) #create a datetime object for yesterday's date
count = 1
end_i = time.time() #end initialization timer
print('Initialization time = '+str(end_i-start_i)+' seconds') #timer
curtail_df = downloadCurtailment(browser, user_initialized) #only needs to run once for each time the code runs
while latestDate_dt.date() < yesterday.date(): #continue downloading and appending data until the most recent data has been added
start = time.time()
tmpDelete('downloads')
downloadDemand(browser, dataDate)
downloadSupply(browser, dataDate)
downloadEmissions(browser, dataDate)
dataQuality()
copyData(latestDate_dt, curtail_df)
latest = checkLatest()
latestDate_dt = latest[0]
dataDate = latest[1]
print(' Data for '+str(datetime.strftime(latestDate_dt, '%m/%d/%Y'))+' appended to data file.')
end = time.time()
print('Loop # '+str(count)+' time = '+str(end-start)+' seconds') #loop timer
count += 1
browser.close()
print('Data file up to date with most recent data')
def checkLatest(): #check dataFile for date of most recent data
with shelve.open(str(shelf)) as s:
latestDate = s['caiso']['latestDate']
latestDate_dt = datetime.strptime(latestDate, '%m/%d/%Y') #parse the date as a date object
unixts = latestDate_dt.timestamp() #convert date to epoch/unix time
pst = pytz.timezone('America/Los_Angeles') #need to account for daylight savings
offset = int(pst.localize(datetime.fromtimestamp(unixts)).strftime('%z')[2]) #return the number of hours behind UTC
dataDate = math.trunc((unixts - (3600 * offset)) * 1000 + 86400000) #this is the data attribute that the website uses to identify dates in the datepicker dropdown #subtracting 28,000 sec converts to PST, convert to millisec, add one day
return latestDate_dt, dataDate
def webdriverConfig(): #configure the webdriver
options = webdriver.ChromeOptions()
#options.add_argument('--headless') #disabled: downloading files does not work in headless mode
options.add_argument('log-level=1') #ignore any info warnings
prefs = {"download.default_directory" : str(downloads)}
options.add_experimental_option("prefs",prefs)
browser = webdriver.Chrome(options=options)
return browser
def download_wait(f): #wait for files to finish downloading before continuing
seconds = 0
dl_wait = True
while dl_wait and seconds < 20:
time.sleep(1) #check every sec
dl_wait = False
for fname in os.listdir(Path.cwd() / f):
if fname.endswith('.crdownload'): #incomplete chrome downloads end in .crdownload
dl_wait = True
seconds += 1
time.sleep(1) #allow 1 sec after downloading
def downloadCurtailment(browser, user_initialized): #download curtailment data (updated monthly)
print(' Checking for new curtailment data...')
browser.get(curtailURL) #open webdriver
time.sleep(1) #wait for page to load
soup = BeautifulSoup(browser.page_source, 'lxml') #use beautifulsoup to parse html
postDate = soup.find_all('span', class_='postDate')[0].get_text() #get current postDate from site
with shelve.open(str(shelf)) as s:
prevPostDate = s['caiso']['postDate']
if postDate==prevPostDate: #compare current and previous postdate
print(' Latest curtailment data already downloaded.') #do nothing if they match; we already have the most current file
curtail_read = | pd.read_csv(curtailments / 'curtailment_data.csv', dtype=ct_dtypes) | pandas.read_csv |
#!/usr/bin/env python
import numpy as np
import shutil
import urllib
import urlparse
import os
from core import *
import util
from pprint import pprint
import pandas as pd
class PaperDownload(XmlClass):
def __init__(self, xe=None):
XmlClass.__init__(self,xe=xe)
self.dest = xe.attrib['dest']
self.s_file_obo = os.path.join(SyncDB.DOWNLOAD_DIR(),'hp.obo')
self.s_file_gene2hpo = os.path.join(SyncDB.DOWNLOAD_DIR(),'genes_to_phenotype.txt')
self.fn_hpo_ann = os.path.join(SyncDB.DOWNLOAD_DIR(),'hpo_ann.csv')
self.fn_trrust_rawdata_human = os.path.join(SyncDB.DOWNLOAD_DIR(),'trrust_rawdata.human.tsv')
self.fn_trrust_rawdata_mouse = os.path.join(SyncDB.DOWNLOAD_DIR(),'trrust_rawdata.mouse.tsv')
self.fn_DisGeNET_source = os.path.join(SyncDB.DOWNLOAD_DIR(), 'curated_gene_disease_associations.tsv')
self.fn_DisGeNET_ann = os.path.join(SyncDB.DOWNLOAD_DIR(), 'disgenet_ann.csv')
self.fn_trrust_human_term = os.path.join(SyncDB.DOWNLOAD_DIR(),'trrust_human.csv')
self.fn_trrust_mouse_term = os.path.join(SyncDB.DOWNLOAD_DIR(),'trrust_mouse.csv')
self.fn_symbol = os.path.join(SyncDB.UPLOAD_DIR(),'gid2source_id','symbol.csv')
self.fn_synonym = os.path.join(SyncDB.UPLOAD_DIR(),'gid2source_id','gene_synonym.csv')
self.fn_description = os.path.join(SyncDB.UPLOAD_DIR(),'annotation','gene_description.csv')
self.inputs=['ds:paper',self.fn_symbol,self.fn_synonym, self.fn_description]
self.fn_trrust_term = os.path.join(SyncDB.DOWNLOAD_DIR(), 'trrust_term.csv')
self.fn_trrust_term_pair = os.path.join(SyncDB.DOWNLOAD_DIR(), 'trrust_term_pair.csv')
self.fn_disgenet_term = os.path.join(SyncDB.DOWNLOAD_DIR(), 'disgenet_term.csv')
self.fn_disgenet_term_pair = os.path.join(SyncDB.DOWNLOAD_DIR(), 'disgenet_term_pair.csv')
self.fn_PaGenBase_term = os.path.join(SyncDB.DOWNLOAD_DIR(), 'PaGenBase_term.csv')
self.fn_PaGenBase_term_pair = os.path.join(SyncDB.DOWNLOAD_DIR(), 'PaGenBase_term_pair.csv')
def get_fn_dest(self):
return os.path.join(SyncDB.DOWNLOAD_DIR(),self.dest)
def populate_more(self,root):
XmlClass.populate_more(self,root)
self.outputs.extend([self.fn_hpo_ann,
self.fn_trrust_term,
self.fn_trrust_term_pair,
self.fn_disgenet_term,
self.fn_disgenet_term_pair,
self.fn_DisGeNET_ann,
self.fn_PaGenBase_term,
self.fn_PaGenBase_term_pair,
])
def do_update(self):
self.get_parse_PaGenBase()
self.parse_disgenet()
t_term_human, t_term_pair_human = self.parse_trrust(self.fn_trrust_rawdata_human, 9606, start_id=1)
t_term_mouse, t_term_pair_mouse = self.parse_trrust(self.fn_trrust_rawdata_mouse, 10090,start_id=len(t_term_human)+1)
t_term = pd.concat([t_term_human,t_term_mouse])
t_term_pair = pd.concat([t_term_pair_human,t_term_pair_mouse])
t_term.to_csv(self.fn_trrust_term, index=False)
t_term_pair.to_csv(self.fn_trrust_term_pair, index=False)
parent_child = self.parse_hp(self.s_file_obo)
# print(parent_child['HP:0000001'])
pheno_level = self.get_level(parent_child)
# print(pheno_level['HP:0012823'], pheno_level['HP:0000001'])
self.parse_gp(self.s_file_gene2hpo, pheno_level)
def get_parse_PaGenBase(self):
count_start = 0
S_term = []
S_pair = []
S_file = [
('hotisp.txt', 'Tissue-specific', 9606),
('hocesp.txt', 'Cell-specific', 9606),
('mutisp.txt', 'Tissue-specific', 10090),
('mucesp.txt', 'Cell-specific', 10090),
('ratisp.txt', 'Tissue-specific', 10116),
('drtisp.txt', 'Tissue-specific', 7227)
]
for fn in S_file:
fn = fn[0]
urllib.urlretrieve('http://bioinf.xmu.edu.cn/PaGenBase/browse/{0}'.format(fn),
os.path.join(SyncDB.DOWNLOAD_DIR(), fn))
for (s_file, s_ann, tax_id) in S_file:
s_file = os.path.join(SyncDB.DOWNLOAD_DIR(), s_file)
t_term, t_pair, count_start = self.parse_PaGenBase(s_file, s_ann, tax_id, count_start)
S_term.append(t_term)
S_pair.append(t_pair)
t_term = pd.concat(S_term, ignore_index=True)
t_pair = pd.concat(S_pair, ignore_index=True)
t_term.to_csv(self.fn_PaGenBase_term, index=False)
t_pair.to_csv(self.fn_PaGenBase_term_pair, index=False)
pass
def parse_PaGenBase(self, s_file, s_ann, tax_id, count_start=0):
t = pd.read_table(s_file, skiprows=7)
t.rename2({'Gene Symbol': 'Symbol'})
t = t[['Symbol', 'Sample']].copy()
S_term = util.unique(t.Sample)
data = []
c_id = {}
for x in S_term:
count_start += 1
term_id = 'PGB:%05d' % count_start
term_name = s_ann + ': ' + x
data.append({'term_id': term_id, 'term_name': term_name, 'description': term_name})
c_id[x] = term_id
t_term = pd.DataFrame(data)
t_pair = t[['Symbol', 'Sample']].copy()
t_pair.rename2({'Sample': 'term_name'})
t_pair['term_id'] = t_pair.term_name.apply(lambda x: c_id[x])
t_pair['term_name'] = t_pair.term_name.apply(lambda x: s_ann + ': ' + x)
t_pair['tax_id'] = tax_id
t_pair['type_name'] = 'PaGenBase'
t_pair.drop_duplicates(['term_id', 'Symbol'], inplace=True)
#convert symbol to gid
dt = pd.read_csv(self.fn_symbol)
dt = dt[dt['tax_id']==tax_id]
symbol2gene_id = dict(zip(dt.source_id, dt.gid.astype(str)))
dt = pd.read_csv(self.fn_synonym)
dt = dt[dt['tax_id']==tax_id]
symbol2gene_id.update(dict(zip(dt.source_id, dt.gid.astype(str))))
t_pair['gid'] = t['Symbol'].apply(lambda x: symbol2gene_id.get(x, ''))
t_pair = t_pair[t_pair.gid != ''].copy()
return (t_term, t_pair, count_start)
def parse_trrust(self, s_file, tax_id, start_id):
dt = pd.read_csv(self.fn_symbol)
dt = dt[dt['tax_id']==tax_id]
symbol2gene_id = dict(zip(dt.source_id, dt.gid.astype(str)))
dt = pd.read_csv(self.fn_synonym)
dt = dt[dt['tax_id']==tax_id]
symbol2gene_id.update(dict(zip(dt.source_id, dt.gid.astype(str))))
dt = | pd.read_csv(self.fn_description) | pandas.read_csv |
#%%
import pandas as pd
import numpy as np
import requests
from datetime import datetime as dt
from io import StringIO
import os
import us
import git
#%%
# List urls
urls = {
"case": "https://static.usafacts.org/public/data/covid-19/covid_confirmed_usafacts.csv",
"death": "https://static.usafacts.org/public/data/covid-19/covid_deaths_usafacts.csv"
}
# Find home directory for repo
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
datadir = f"{homedir}/data/usa/covid/"
resourcedir = f"{homedir}/code/annotation/"
# %%
# Read states general information
df_states = pd.read_csv(f"{resourcedir}usa_states_code.csv")
# Initialize dictionary to save dataframes
df_dict = {}
# Loop through urls
for key, url in urls.items():
# Read confirmed cases data
request = requests.get(urls[key])
# Convert into string
txt = StringIO(request.text)
# Convert into dataframe
df = pd.read_csv(txt)
# Melt into long format
df = df.melt(
value_vars=df.columns[4:],
id_vars=df.columns[0:4],
var_name="date",
value_name=key
)
# rename columns
df = df.rename(columns={
"countyFIPS": "county_fips",
"stateFIPS": "state_fips",
"County Name": "county",
"State": "state_iso_a2",
})
# Convert date to datetime
df["date"] = | pd.to_datetime(df["date"]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
:Author: <NAME>
:Date: 2017. 11. 16.
"""
import traceback
import numpy as np
import pandas as pd
from sqlalchemy.exc import SQLAlchemyError
from utils.db import get_connection
def get_stock_master():
"""
Get stock masters from MySQL and return them.
:return stock_masters: (DataFrame)
index code | (string) 6 digit number string code of stock.
columns name | (string) The name of company.
"""
schema_name = 'highvol_stock_master'
select_sql = "SELECT * FROM {}".format(schema_name)
stock_masters = pd.DataFrame()
connection = get_connection()
try:
# get all stock codes from the db.
stock_masters = | pd.read_sql(select_sql, connection) | pandas.read_sql |
# coding=utf-8
"""
Entry point for producing Fractional Cover products.
Specifically intended for running in the PBS job queue system at the NCI.
The three entry points are:
1. datacube-fc submit
2. datacube-fc generate
3. datacube-fc run
"""
import errno
import logging
import os
import sys
from copy import deepcopy
from datetime import datetime
from functools import partial
from math import ceil
from pathlib import Path
from time import time as time_now
from typing import Tuple
import click
import xarray
from pandas import to_datetime
from datacube.api.grid_workflow import GridWorkflow, Tile
from datacube.api.query import Query
from datacube.index._api import Index
from datacube.model import DatasetType, GeoPolygon
from datacube.model.utils import make_dataset, xr_apply, datasets_to_doc
from datacube.storage.storage import write_dataset_to_netcdf
from datacube.ui import click as ui
from datacube.ui import task_app
from datacube.utils import unsqueeze_dataset
from digitalearthau import paths, serialise
from digitalearthau.qsub import QSubLauncher, with_qsub_runner, TaskRunner
from digitalearthau.runners.model import TaskDescription
from digitalearthau.runners.util import submit_subjob, init_task_app
from fc import __version__
from fc.fractional_cover import fractional_cover
APP_NAME = 'datacube-fc'
_LOG = logging.getLogger(__file__)
CONFIG_DIR = Path(__file__).parent / 'config'
def make_fc_config(index: Index, config: dict, dry_run=False, **kwargs):
if not os.access(config['location'], os.W_OK):
_LOG.warning('Current user appears not have write access output location: %s', config['location'])
source_product, output_product = _ensure_products(config, index, dry_run=dry_run)
# The input config has `source_product` and `output_product` fields which are names. Perhaps these should
# just replace them?
config['nbar_product'] = source_product
config['fc_product'] = output_product
config['variable_params'] = _build_variable_params(config)
if 'task_timestamp' not in config:
config['task_timestamp'] = int(time_now())
return config
_MEASUREMENT_KEYS_TO_COPY = ('zlib', 'complevel', 'shuffle', 'fletcher32', 'contiguous', 'attrs')
def _build_variable_params(config: dict) -> dict:
chunking = config['storage']['chunking']
chunking = [chunking[dim] for dim in config['storage']['dimension_order']]
variable_params = {}
for mapping in config['measurements']:
measurment_name = mapping['name']
variable_params[measurment_name] = {
k: v
for k, v in mapping.items()
if k in _MEASUREMENT_KEYS_TO_COPY
}
variable_params[measurment_name]['chunksizes'] = chunking
return variable_params
def _ensure_products(app_config: dict, index: Index, dry_run=False) -> Tuple[DatasetType, DatasetType]:
source_product_name = app_config['source_product']
source_product = index.products.get_by_name(source_product_name)
if not source_product:
raise ValueError(f"Source Product {source_product_name} does not exist")
output_product = DatasetType(
source_product.metadata_type,
_create_output_definition(app_config, source_product)
)
if not dry_run:
_LOG.info('Built product %s. Adding to index.', output_product.name)
output_product = index.products.add(output_product)
return source_product, output_product
def _create_output_definition(config: dict, source_product: DatasetType) -> dict:
output_product_definition = deepcopy(source_product.definition)
output_product_definition['name'] = config['output_product']
output_product_definition['managed'] = True
output_product_definition['description'] = config['description']
output_product_definition['metadata']['format'] = {'name': 'NetCDF'}
output_product_definition['metadata']['product_type'] = config.get('product_type', 'fractional_cover')
output_product_definition['storage'] = {
k: v for (k, v) in config['storage'].items()
if k in ('crs', 'tile_size', 'resolution', 'origin')
}
var_def_keys = {'name', 'dtype', 'nodata', 'units', 'aliases', 'spectral_definition', 'flags_definition'}
output_product_definition['measurements'] = [
{k: v for k, v in measurement.items() if k in var_def_keys}
for measurement in config['measurements']
]
return output_product_definition
def get_filename(config, tile_index, sources):
file_path_template = str(Path(config['location'], config['file_path_template']))
return file_path_template.format(tile_index=tile_index,
start_time=to_datetime(sources.time.values[0]).strftime('%Y%m%d%H%M%S%f'),
end_time= | to_datetime(sources.time.values[-1]) | pandas.to_datetime |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_preprocessing
----------------------------------
Tests for `preprocessing` module.
"""
import pytest
from sktutor.preprocessing import (GroupByImputer, MissingValueFiller,
OverMissingThresholdDropper,
ValueReplacer, FactorLimiter,
SingleValueAboveThresholdDropper,
SingleValueDropper, ColumnExtractor,
ColumnDropper, DummyCreator,
ColumnValidator, TextContainsDummyExtractor,
BitwiseOperator, BoxCoxTransformer,
InteractionCreator, StandardScaler,
PolynomialFeatures, ContinuousFeatureBinner,
TypeExtractor, GenericTransformer,
MissingColumnsReplacer)
from sktutor.pipeline import make_union
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from random import shuffle
from sklearn.pipeline import make_pipeline
@pytest.mark.usefixtures("missing_data")
@pytest.mark.usefixtures("missing_data2")
class TestGroupByImputer(object):
def test_groups_most_frequent(self, missing_data):
# Test imputing most frequent value per group.
prep = GroupByImputer('most_frequent', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.0, 4.0, 4.0, 4.0, 7.0, 9.0, 9.0, 9.0],
'd': ['a', 'a', 'a', None, 'e', 'f', 'j', 'h', 'j', 'j'],
'e': [1, 2, 1, None, None, None, None, None, None, None],
'f': ['a', 'b', 'a', None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_groups_mean(self, missing_data):
# Test imputing mean by group.
prep = GroupByImputer('mean', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 7 + 2/3, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.5, 4.0, 4.0, 4.0, 7.0, 9.0, 8+1/3, 9.0],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_groups_median(self, missing_data):
# Test imputing median by group.
prep = GroupByImputer('median', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 1.5, 4, 4, 4, 7, 9, 9, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_most_frequent(self, missing_data):
# Test imputing most frequent with no group by.
prep = GroupByImputer('most_frequent')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, 2, 4, 4, 7, 8, 2, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 4.0, 4.0, 4.0, 4.0, 7.0, 9.0, 4.0, 9.0],
'd': ['a', 'a', 'a', 'a', 'e', 'f', 'a', 'h', 'j', 'j'],
'e': [1, 2, 1, 1, 1, 1, 1, 1, 1, 1],
'f': ['a', 'b', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a'],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_mean(self, missing_data):
# Test imputing mean with no group by.
prep = GroupByImputer('mean')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 5, 5, 4, 4, 7, 8, 5, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 5, 4, 4, 4, 7, 9, 5, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_median(self, missing_data):
# Test imputing median with no group by.
prep = GroupByImputer('median')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 4, 4, 4, 4, 7, 8, 4, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 4, 4, 4, 4, 7, 9, 4, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_value_error(self, missing_data):
# Test limiting options without a group by.
prep = GroupByImputer('stdev')
with pytest.raises(ValueError):
prep.fit(missing_data)
def test_key_error(self, missing_data):
# Test imputing with np.nan when a new group level is introduced in
# Transform.
prep = GroupByImputer('mean', 'b')
prep.fit(missing_data)
new_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'987', '987', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
new_data = pd.DataFrame(new_dict)
# set equal to the expected for test means group
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 7+2/3, 8],
'b': ['123', '123', '123',
'987', '987', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.5, 4.0, 4.0, 4.0, 7.0, 9.0, 8+1/3, 9.0],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
result = prep.transform(new_data)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_most_frequent(self, missing_data2):
# Test most frequent with group by with 2 columns.
prep = GroupByImputer('most_frequent', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', 'a', 'e', 'e', 'f', 'f', 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_mean(self, missing_data2):
# Test mean with group by with 2 columns.
prep = GroupByImputer('mean', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1.5, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j',
'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_median(self, missing_data2):
# Test median with group by with 2 columns.
prep = GroupByImputer('median', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1.5, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j',
'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = GroupByImputer('most_frequent', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.0, 4.0, 4.0, 4.0, 7.0, 9.0, 9.0, 9.0],
'd': ['a', 'a', 'a', None, 'e', 'f', 'j', 'h', 'j', 'j'],
'e': [1, 2, 1, None, None, None, None, None, None, None],
'f': ['a', 'b', 'a', None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = | pd.DataFrame(exp_dict, index=new_index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
""" Function that implement Complement the Complementary Cumulative
Distribution Function (CCDF).
"""
#
# written by <NAME> <<EMAIL>>
import numpy as np
import pandas as pd
def ccdf(s):
"""
Parameters:
`s`, series, the values of s should be variable to be handled
Return:
a new series `s`, index of s will be X axis (number), value of s
will be Y axis (probability)
"""
s = s.copy()
s = s.sort_values(ascending=True, inplace=False)
s.reset_index(drop=True, inplace=True)
n = len(s)
s.drop_duplicates(keep='first', inplace=True)
X = s.values
Y = [n - i for i in s.index]
return pd.Series(data=Y, index=X) / n
def sum_cdf(s):
s = s.copy()
s = s.value_counts()
s = s.sort_index(ascending=True)
cumulative = []
for i in range(len(s)):
s0 = s.iloc[:i + 1]
cumulative.append(np.inner(s0.index, s0.values))
s = | pd.Series(cumulative, index=s.index) | pandas.Series |
import datetime
import os
import time
import copy
from enum import Enum
import requests
import yaml
import pandas as pd
class ExceptionFogifySDK(Exception):
pass
class FogifySDK(object):
def __init__(self, url: str, docker_compose: str = None):
self.url = url
self.docker_compose = None
self.nodes = []
self.networks = []
self.topology = []
self.services = []
self.docker_swarm_rep = None
if docker_compose:
try:
file = open(docker_compose, "r")
self.docker_compose = file.read()
file.close()
self.parse_docker_swarm()
except FileNotFoundError:
raise ExceptionFogifySDK("No such file or directory: " + docker_compose)
class Action_type(Enum):
HORIZONTAL_SCALING = 'HORIZONTAL_SCALING'
VERTICAL_SCALING = 'VERTICAL_SCALING'
NETWORK = 'NETWORK'
STRESS = 'STRESS'
COMMAND = 'COMMAND'
UPDATE_LINKS = 'UPDATE_LINKS'
def get_url(self, path: str = ""):
if not self.url.startswith("http://"):
return "https://%s" % (self.url + path)
return self.url + path
def check_docker_swarm_existence(self):
if self.docker_compose is None:
raise ExceptionFogifySDK('You can not apply this functionality with fogify yaml')
def parse_docker_swarm(self):
self.check_docker_swarm_existence()
self.docker_swarm_rep = yaml.safe_load(self.docker_compose)
if 'services' not in self.docker_swarm_rep:
raise ExceptionFogifySDK("The docker-compose should have at least services")
if 'x-fogify' in self.docker_swarm_rep:
if self.docker_swarm_rep['x-fogify']:
self.networks = self.docker_swarm_rep['x-fogify']['networks'] if 'networks' in self.docker_swarm_rep[
'x-fogify'] else []
self.nodes = self.docker_swarm_rep['x-fogify']['nodes'] if 'nodes' in self.docker_swarm_rep[
'x-fogify'] else []
self.scenarios = self.docker_swarm_rep['x-fogify']['scenarios'] if 'scenarios' in self.docker_swarm_rep[
'x-fogify'] else []
self.topology = self.docker_swarm_rep['x-fogify']['topology'] if 'topology' in self.docker_swarm_rep[
'x-fogify'] else []
self.services = [i for i in self.docker_swarm_rep["services"]]
def upload_file(self, remove_file: bool = True):
if self.docker_compose:
self.docker_swarm_rep["x-fogify"] = {
"networks": self.networks if hasattr(self, 'networks') else [],
"topology": self.topology if hasattr(self, 'topology') else [],
"nodes": self.nodes if hasattr(self, 'nodes') else [],
"scenarios": self.scenarios if hasattr(self, 'scenarios') else []
}
f = open("fogified-docker-compose.yaml", "w")
f.write(yaml.dump(self.docker_swarm_rep))
f.close()
self.fogify_yaml = open("fogified-docker-compose.yaml", "rb")
if remove_file:
os.remove("fogified-docker-compose.yaml")
return self.fogify_yaml
def __del__(self):
if hasattr(self, 'fogify_yaml') and self.fogify_yaml:
self.fogify_yaml.close()
del self
def deploy(self, timeout: int = 120):
url = self.get_url("/topology/")
self.clean_metrics()
self.clean_annotations()
response = requests.post(url, files={"file": self.upload_file()}, headers={}).json()
if not ('message' in response and response['message'].upper() == "OK"):
raise ExceptionFogifySDK("The deployment is failed (%s)"%str(response))
service_count = {name: response['swarm']['services'][name]['deploy']['replicas'] for name in
response['swarm']['services']}
from tqdm import tqdm
total = sum([int(service_count[i]) for i in service_count])
pbar = tqdm(total=total, desc="Deploy process")
count = 0
current_iteration = 0
while (count < total and current_iteration < timeout):
time.sleep(5)
response = requests.get(url, headers={})
if response.status_code != 200:
raise ExceptionFogifySDK("The deployment is failed (%s)" % str(response.json()))
response = response.json()
new_count = 0
for i in response:
new_count += len(response[i])
dif = new_count - count
pbar.update(dif)
count = new_count
current_iteration += 5
pbar.close()
if current_iteration > timeout:
self.undeploy()
raise ExceptionFogifySDK("The deployment is failed")
return {
"message": "The services are deployed ( %s )" % str(service_count)
}
def undeploy(self, timeout: int = 120):
url = self.get_url("/topology/")
response = requests.delete(url)
if response.status_code != 200:
raise ExceptionFogifySDK("Server error ( %s )" % str(response.json()))
response = requests.get(url, headers={}).json()
total = 0
for i in response:
total += len(response[i])
from tqdm import tqdm
pbar = tqdm(total=total, desc="Undeploy process")
count = total
current_iteration = 0
while (count > 0 and current_iteration < timeout):
time.sleep(5)
response = requests.get(url, headers={}).json()
new_count = 0
for i in response:
new_count += len(response[i])
dif = count - new_count
pbar.update(dif)
count = new_count
current_iteration += 5
self.data = {}
pbar.close()
if current_iteration > timeout:
raise ExceptionFogifySDK("The undeployment is failed")
return {
"message": "The %s services are undeployed" % str(total)
}
def get_metrics(self, service: str = None, from_timestamp: str = None, to_timestamp: str = None):
query = ""
query += "from_timestamp=" + str(
int(datetime.datetime.timestamp(from_timestamp))) + "&" if from_timestamp else ""
query += "to_timestamp=" + str(int(datetime.datetime.timestamp(to_timestamp))) + "&" if to_timestamp else ""
query += "service=" + service if service else ""
if hasattr(self, 'data') and service in self.data:
resp = requests.get(self.get_url("/monitorings/") + "?" + query).json()
if service in resp:
resp[service].sort(key=lambda k: k['count'])
intervals = [i['count'] for i in self.data[service]]
for i in resp[service]:
if i['count'] not in intervals:
self.data[service].append(i)
else:
self.data = requests.get(self.get_url("/monitorings/") + "?" + query).json()
for i in self.data:
self.data[i].sort(key=lambda k: k['count'])
return self
def get_network_packets_from(self, service: str, from_timestamp: str = None, to_timestamp: str = None,
packet_type: str = None):
query = ""
query += "from_timestamp=" + str(
int(datetime.datetime.timestamp(from_timestamp))) + "&" if from_timestamp else ""
query += "to_timestamp=" + str(int(datetime.datetime.timestamp(to_timestamp))) + "&" if to_timestamp else ""
query += "packet_type=" + str(packet_type) + "&" if packet_type else ""
query += "service=" + service
data = requests.get(self.get_url("/packets/") + "?" + query).json()
if "res" not in data:
raise ExceptionFogifySDK("The API call for packets does not response readable object")
res = pd.DataFrame.from_records(data["res"])
return res
def get_metrics_from(self, service: str):
if hasattr(self, 'data') and service in self.data:
self.get_metrics(service=service,
from_timestamp=datetime.datetime.strptime(self.data[service][-1]['timestamp'],
"%a, %d %b %Y %H:%M:%S %Z") - datetime.timedelta(
milliseconds=100))
else:
self.get_metrics()
res = | pd.DataFrame.from_records(self.data[service]) | pandas.DataFrame.from_records |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 11 13:20:11 2019
@author: strq
"""
import pytest
import viessmann_data_audit as vda
import numpy as np
import pandas as pd
from sklearn.datasets.samples_generator import make_blobs
from datetime import datetime
strEntityFile = 'tests/resources/entity.gz.pkl'
strEntityFastFile = 'tests/resources/entity_fast.gz.pkl'
def create_test_data():
# Unique Numbers
arrNumbers = np.arange(0,5)
# Numbers with Nan
arrNanNumbers = np.array([1, np.nan, 2, np.nan, 3])
# Non unique objects
arrObj = ["A", "A", "B", "C", "D"]
# Categorical
# Categorical with Nan
serCat = pd.Series(["a", "b", "c", "b", "a"])
serNanCatl = pd.Series(["b", "a", "c", "e", "f"])
cat_type = pd.api.types.CategoricalDtype(
categories=["a", "b", "c", "d"], ordered=True)
serCategorical = serCat.astype(cat_type)
serNanCategorical = serNanCatl.astype(cat_type)
serNoCat = pd.Series(["a", "b", "c", "b", "a"])
cat_no_order_type = pd.api.types.CategoricalDtype(
categories=["a", "b", "c", "d"], ordered=False)
serNoOrderCategorical = serNoCat.astype(cat_no_order_type)
# Outlier
arrOutlier = np.array([1,1,1,1,10])
dictionary = {"id": arrNumbers,
"nanNumbers": arrNanNumbers,
"nonUniqueObjects": arrObj,
"categorical": serCategorical,
"nanCategorical": serNanCategorical,
"noOrderCategorical": serNoOrderCategorical,
"sigOutlier": arrOutlier}
dfData = pd.DataFrame(dictionary)
dfData.insert(0,'TimeStamp',pd.datetime.now().replace(microsecond=0))
dfData.loc[0,"TimeStamp"] = pd.Timestamp('2018-12-01 08:00:00.000000', tz=None)
dfData.loc[1,"TimeStamp"] = pd.Timestamp('2018-12-01 08:00:01.000000', tz=None)
dfData.loc[2,"TimeStamp"] = pd.Timestamp('2019-01-31 08:00:00.000000', tz=None)
dfData.loc[3,"TimeStamp"] = pd.Timestamp('2019-01-31 08:01:00.000000', tz=None)
dfData.loc[4,"TimeStamp"] = pd.Timestamp('2021-01-31 09:00:00.000000', tz=None)
return dfData
def create_dctValuesAllValid():
dctValuesAllValid = dict()
dctValuesAllValid.update({"id":[0,4]})
dctValuesAllValid.update({"nanNumbers":[1,3]})
dctValuesAllValid.update({"nonUniqueObjects":["A", "B", "C", "D"]})
dctValuesAllValid.update({"categorical":["a", "d"]})
dctValuesAllValid.update({"nanCategorical":["a", "d"]})
dctValuesAllValid.update({"noOrderCategorical":["a", "b", "c", "d"]})
dctValuesAllValid.update({"sigOutlier":[1,10]})
return dctValuesAllValid
def create_dctValuesNoneValid():
dctValuesNoneValid = dict()
dctValuesNoneValid.update({"id":[5,6]})
dctValuesNoneValid.update({"nanNumbers":[4,5]})
dctValuesNoneValid.update({"nonUniqueObjects":[]})
dctValuesNoneValid.update({"categorical":[]})
dctValuesNoneValid.update({"nanCategorical":[]})
dctValuesNoneValid.update({"noOrderCategorical":[]})
dctValuesNoneValid.update({"sigOutlier":[2,4]})
return dctValuesNoneValid
def create_dctOutliers():
dctOutliers = dict()
dctOutliers.update({"id":[0,4]})
dctOutliers.update({"nanNumbers":[1,3]})
dctOutliers.update({"sigOutlier":[1,1]})
return dctOutliers
def create_dctWrongOutliersDct():
dctOutliers = dict()
dctOutliers.update({"id":["A"]})
dctOutliers.update({"nanNumbers":[1,3]})
dctOutliers.update({"sigOutlier":[1,1]})
return dctOutliers
def create_test_data_no_time():
# Unique Numbers
arrNumbers = np.arange(0,5)
# Numbers with Nan
arrNanNumbers = np.array([1, np.nan, 2, np.nan, 3])
# Non unique objects
arrObj = ["A", "A", "B", "C", "D"]
# Categorical
# Categorical with Nan
serCat = pd.Series(["a", "b", "c", "b", "a"])
serNanCatl = pd.Series(["b", "a", "c", "e", "f"])
cat_type = pd.api.types.CategoricalDtype(
categories=["a", "b", "c", "d"], ordered=True)
serCategorical = serCat.astype(cat_type)
serNanCategorical = serNanCatl.astype(cat_type)
# Outlier
arrOutlier = np.array([1,1,1,1,10])
dictionary = {"id": arrNumbers,
"nanNumbers": arrNanNumbers,
"nonUniqueObjects": arrObj,
"categorical": serCategorical,
"nanCategorical": serNanCategorical,
"sigOutlier": arrOutlier}
dfData = pd.DataFrame(dictionary)
return dfData
class TestDataAudits(object):
"""
def test_wrong_proportion_of_missing_values(self):
with pytest.raises(ValueError):
dfData = create_test_data()
dfResult = proportion_of_missing_values(dfData,
strDetectionMode = "blub",
boolDropOutliers = False)
"""
def test_data_type_mapper_numeric(self):
assert vda.data_type_mapper("float64") == "Numeric"
def test_data_type_mapper_undefined(self):
assert vda.data_type_mapper("timedelta[ns]") == "Undefined"
def test_data_type_mapper_datetime(self):
assert vda.data_type_mapper("datetime64[ns]") == "Datetime"
def test_data_type_mapper_name(self):
assert vda.data_type_mapper("category") == "Name"
def test_data_description(self):
dfData = create_test_data()
dfResults = vda.data_description(dfData)
lstCompare = ['Datetime', 'Numeric', 'Numeric', 'Name', 'Name',
'Name', 'Name', 'Numeric']
assert list(dfResults["Data_Type"]) == lstCompare
def test_classify_data_type_logic(self):
dfData = create_test_data()
assert vda.classify_data_type_logic(dfData["id"]) == "num"
def test_proportion_of_missing_values(self):
dfData = create_test_data()
serResults = vda.proportion_of_missing_values(dfData)
assert serResults[5] == 0.4
def test_proportion_of_invalid_values_All_Valid(self):
dfData = create_test_data()
dctValues = create_dctValuesAllValid()
dfResults = vda.proportion_of_invalid_values(dfData, dctValues)
assert(dfResults.loc[1, "Proportion_Invalid_Values"] == 0.4)
def test_proportion_of_invalid_values_None_Valid(self):
dfData = create_test_data()
dctValues = create_dctValuesNoneValid()
dfResults = vda.proportion_of_invalid_values(dfData, dctValues)
assert(dfResults.loc[1, "Proportion_Invalid_Values"] == 1)
def test_proportion_of_outliers(self):
dfData = create_test_data()
dctOutliers = create_dctOutliers()
dfResults = vda.proportion_of_outliers(dfData, dctOutliers)
assert(dfResults.loc[2, "Proportion_Outliers"] == 0.2)
def test_wrong_types_list_proportion_of_outliers(self):
with pytest.raises(AssertionError):
dfData = create_test_data()
dctOutliers = create_dctWrongOutliersDct()
vda.proportion_of_outliers(dfData, dctOutliers)
def test_value_range_of_features(self):
dfData = create_test_data()
serResults = vda.value_range_of_features(dfData)
assert(serResults['id'] == [0,4])
def test_number_of_unique_values(self):
dfData = create_test_data()
serResults = vda.number_of_unique_values(dfData)
assert(serResults['id'] == 5)
def test_granularity_of_timestamp_feature(self):
dfData = create_test_data()
dfResults = vda.granularity_of_timestamp_feature(dfData, ["D"])
assert(dfResults.loc[0, "Maximum"] == 731.0409722222222)
def test_granularity_of_timestamp_feature_wrong_timeConversion(self):
with pytest.raises(TypeError):
dfData = create_test_data()
vda.granularity_of_timestamp_feature(dfData, "E")
def test_convert_time_column_and_granularity_of_timestamp(self):
dfData = create_test_data()
lstScale = ["Y","M","W","D","h","m","s"]
dfResults, dfRecommendation = vda\
.convert_time_column_and_granularity_of_timestamp(dfData,
["TimeStamp"],
lstScale)
assert round(dfResults["Mean"].iloc[3], 2) == 198.01
def test_convert_time_column_and_granularity_of_timestamp2(self):
dfData = create_test_data()
lstScale = ["Y","M","W","D","h","m","s"]
dfResults, dfRecommendation = vda\
.convert_time_column_and_granularity_of_timestamp(dfData,
["TimeStamp"],
lstScale)
assert dfRecommendation["Recommend granularity"].iloc[0] == "Months"
def test_pca_proj_kMeans(self):
X, y_true = make_blobs(n_samples=300, centers=4,
cluster_std=0.60, random_state=0)
dfX = pd.DataFrame(X)
#dfY = pd.DataFrame(y_true)
a, b = vda.proj_kMeans(dfX, 4, False)
assert a == 0.6270346325358909
def test_ica_proj_kMeans(self):
X, y_true = make_blobs(n_samples=300, centers=4,
cluster_std=0.60, random_state=0)
dfX = pd.DataFrame(X)
#dfY = pd.DataFrame(y_true)
a, b = vda.proj_kMeans(dfX, 4, False, method='ica')
assert round(a, 3) == 0.612
def test_kMeans(self):
X, y_true = make_blobs(n_samples=300, centers=4,
cluster_std=0.60, random_state=0)
dfX = | pd.DataFrame(X) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
from scipy.stats import spearmanr
from scipy.stats import pearsonr
import statsmodels
from statsmodels.distributions.empirical_distribution import ECDF
####### EDIT THIS #####
def mimosca_get_1sidepval(B,joint,edges,gsums,gvar,nguides):
Bpval=B.copy()
#create index lookup for each gene to the pairs
genevec=np.array(range(len(gsums)))
guidevec=np.array(range(len(nguides)))
gsums=np.array(gsums)
gvar=np.array(gvar)
nguides=np.array(nguides)
rowindex_dict={}
colindex_dict={}
for i in range(len(edges[0])-1):
for j in range(len(edges[1])-1):
logical_gsums=np.logical_and(gsums>=edges[0][i],gsums<edges[0][i+1])
logical_gvar=np.logical_and(gvar>=edges[1][j],gvar<edges[1][j+1])
logical_both=np.logical_and(logical_gsums,logical_gvar)
if np.sum(logical_both)>0:
rowindex_dict[(i,j)]=genevec[logical_both]
for i in range(len(edges[2])-1):
logical_nguides=np.logical_and(nguides>=edges[2][i],nguides<edges[2][i+1])
if np.sum(logical_nguides)>0:
colindex_dict[i]=guidevec[logical_nguides]
maxedges=len(edges[3])-2
for key in rowindex_dict.keys():
for guidekey in colindex_dict.keys():
curjoint=joint[key[0]][key[1]][guidekey]
curjoint /= curjoint.sum()
curjoint=pd.DataFrame(curjoint)
curjoint.index=edges[3][:-1]
curjoint=curjoint.cumsum()
curmat=Bpval.iloc[rowindex_dict[key],colindex_dict[guidekey]]
lookup_mat=curmat.copy()
bp=pd.DataFrame(np.searchsorted(curjoint.index,curmat))
bpmax=bp>maxedges
bp[bpmax]=0
for i in range(np.shape(bp)[1]):
lookup=1.0-np.round(np.array(curjoint)[bp.iloc[:,i]],10)
lookup_mat.iloc[:,i]=lookup
lookup_mat.iloc[np.where(bpmax)]=0
Bpval.iloc[rowindex_dict[key],colindex_dict[guidekey]]=lookup_mat
Bpval[B<=0]=1.0
return Bpval
def mimosca_fdr_coefs(B,B_shuf,gsums,gvar,nguides,mybins=[30,30,20,1000]):
numshufs=(1.0*len(B_shuf))/len(B)
if numshufs%1!=0:
print('you screwed up permuted is not integer multiple of nonpermuted')
return
numshufs=int(numshufs)
gsums_rep=np.array([list(gsums)]*numshufs).flatten()
gvar_rep=np.array([list(gvar)]*numshufs).flatten()
nguides=np.array(nguides)
flag=0
for i in range(np.shape(B_shuf)[1]):
datas=pd.DataFrame([gsums_rep,gvar_rep,np.array([nguides[i]]*len(gsums_rep)),np.array(B_shuf.iloc[:,i])]).T
if flag==0:
SHUFCOV=datas
flag=1
else:
SHUFCOV=pd.concat([SHUFCOV,datas])
numBins = mybins # number of bins in each dimension
SHUFPOS=SHUFCOV.copy()
SHUFPOS=SHUFPOS[SHUFPOS[3]>=0]
joint_pos, edges_pos = np.histogramdd(np.array(SHUFPOS), bins=numBins)
joint_pos /= joint_pos.sum()
SHUFNEG=SHUFCOV.copy()
SHUFNEG=SHUFNEG[SHUFNEG[3]<=0]
SHUFNEG[3]=SHUFNEG[3].abs()
joint_neg, edges_neg = np.histogramdd(np.array(SHUFNEG), bins=numBins)
joint_neg /= joint_neg.sum()
print('Created 4D Null Distributions')
B_sign = np.sign(B)
Bpos=B.copy()
Bpos[B<0]=0
Bneg=B.copy()
Bneg[B>0]=0
Bneg=Bneg.abs()
Bpval_pos=mimosca_get_1sidepval(Bpos,joint_pos,edges_pos,gsums,gvar,nguides)
print('positive pvals calculated')
Bpval_neg=mimosca_get_1sidepval(Bneg,joint_neg,edges_neg,gsums,gvar,nguides)
print('negative pvals calculated')
BFDR=Bpval_pos.copy()
BFDR[Bpval_neg<1]=Bpval_neg[Bpval_neg<1]
for col in BFDR.columns:
curcol=BFDR[col]
curcol_logical=curcol<1
BFDR.loc[curcol_logical,col]=statsmodels.sandbox.stats.multicomp.fdrcorrection0(curcol[curcol_logical])[1]
pmin=np.min(np.abs(np.array(BFDR))[np.nonzero(np.array(BFDR))])
BFDR[BFDR==0.0]=pmin
BFDR=-np.log10(BFDR)
BFDR=np.multiply(B_sign,BFDR)
print('FDR correction performed')
return BFDR
def mimosca_pointwise_p_colwisefdr(B,Bshuf):
BFDR=B.copy()
for col in B.columns:
probs=[]
sign=[]
for ind in B.index:
curecdf=ECDF(Bshuf[col].loc[ind])
curval=B[col].loc[ind]
if curval>0:
sign.append(1)
probs.append(1.0-curecdf(B[col].loc[ind]))
else:
sign.append(-1)
probs.append(curecdf(B[col].loc[ind]))
probs=np.array(probs)
#make it the smallest entry in the column
#TODO: improve this
probs[probs==0.0]=pmin=np.min(probs[np.nonzero(np.array(probs))])
sign=np.array(sign)
BFDR[col]=sign*(-np.log10(statsmodels.sandbox.stats.multicomp.fdrcorrection0(probs)[1]))
return BFDR
def coef_FDR(adata_here,
model_name='linear_model',
num_permutations=1,
bins=[3,3,23,1000],
copy_adata=False):
if copy_adata: adata_here = adata_here.copy()
print('== Computing coefficient significance')
#we will split into 2 paths.
#- genes go to the FDR by covariate
#- the rest go to the FDR by column
B_full=adata_here.uns['PS.'+model_name+'.coef']
genes_used=list(set(adata_here.var_names).intersection(set(adata_here.uns['PS.'+model_name+'.y'].columns)))
obs_used=list(set(list(B_full.index)).difference(set(genes_used)))
perturbations=adata_here.uns['PS.'+model_name+'.X'].columns
fdrs=[]
if len(genes_used)>0:
#fancy FDRs for genes
B_genes=B_full.loc[genes_used,perturbations]
B_genes_shuf_list=[]
for p in range(num_permutations):
B_genes_shuf_list.append(adata_here.uns['PS.'+model_name+'.coef.perm'+str(p)].loc[genes_used,perturbations])
B_genes_shuf=pd.concat(B_genes_shuf_list,axis=0)
#TODO: error if there is no adata.raw
gsums=np.mean(adata_here.raw[:,genes_used].X.toarray(),axis=0)
gvar=np.var(adata_here.raw[:,genes_used].X.toarray(),axis=0)
nguides=[]
for col_idx in range(adata_here.uns['PS.'+model_name+'.X'].shape[1]):
col=list(adata_here.uns['PS.'+model_name+'.X'])[col_idx]
cells_per_guide=adata_here.uns['PS.'+model_name+'.X'][col].sum()
nguides.append(cells_per_guide)
BFDR_genes=mimosca_fdr_coefs(B_genes,B_genes_shuf,gsums,gvar,nguides,mybins=bins)
fdrs.append(BFDR_genes)
if len(obs_used)>0:
B_obs=B_full.loc[obs_used,perturbations]
B_obs_shuf_list=[]
for p in range(num_permutations):
B_obs_shuf_list.append(adata_here.uns['PS.'+model_name+'.coef.perm'+str(p)].loc[obs_used,perturbations])
B_obs_shuf= | pd.concat(B_obs_shuf_list,axis=0) | pandas.concat |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from mars.dataframe.datasource.dataframe import from_pandas
from mars.dataframe.datasource.series import from_pandas as series_from_pandas
from mars.dataframe.merge import concat
from mars.dataframe.utils import sort_dataframe_inplace
def test_merge(setup):
df1 = pd.DataFrame(np.arange(20).reshape((4, 5)) + 1, columns=['a', 'b', 'c', 'd', 'e'])
df2 = pd.DataFrame(np.arange(20).reshape((5, 4)) + 1, columns=['a', 'b', 'x', 'y'])
df3 = df1.copy()
df3.index = pd.RangeIndex(2, 6, name='index')
df4 = df1.copy()
df4.index = pd.MultiIndex.from_tuples([(i, i + 1) for i in range(4)], names=['i1', 'i2'])
mdf1 = from_pandas(df1, chunk_size=2)
mdf2 = from_pandas(df2, chunk_size=2)
mdf3 = from_pandas(df3, chunk_size=3)
mdf4 = from_pandas(df4, chunk_size=2)
# Note [Index of Merge]
#
# When `left_index` and `right_index` of `merge` is both false, pandas will generate an RangeIndex to
# the final result dataframe.
#
# We chunked the `left` and `right` dataframe, thus every result chunk will have its own RangeIndex.
# When they are contenated we don't generate a new RangeIndex for the result, thus we cannot obtain the
# same index value with pandas. But we guarantee that the content of dataframe is correct.
# merge on index
expected0 = df1.merge(df2)
jdf0 = mdf1.merge(mdf2)
result0 = jdf0.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected0, 0), sort_dataframe_inplace(result0, 0))
# merge on left index and `right_on`
expected1 = df1.merge(df2, how='left', right_on='x', left_index=True)
jdf1 = mdf1.merge(mdf2, how='left', right_on='x', left_index=True)
result1 = jdf1.execute().fetch()
expected1.set_index('a_x', inplace=True)
result1.set_index('a_x', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected1, 0), sort_dataframe_inplace(result1, 0))
# merge on `left_on` and right index
expected2 = df1.merge(df2, how='right', left_on='a', right_index=True)
jdf2 = mdf1.merge(mdf2, how='right', left_on='a', right_index=True)
result2 = jdf2.execute().fetch()
expected2.set_index('a', inplace=True)
result2.set_index('a', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected2, 0), sort_dataframe_inplace(result2, 0))
# merge on `left_on` and `right_on`
expected3 = df1.merge(df2, how='left', left_on='a', right_on='x')
jdf3 = mdf1.merge(mdf2, how='left', left_on='a', right_on='x')
result3 = jdf3.execute().fetch()
expected3.set_index('a_x', inplace=True)
result3.set_index('a_x', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected3, 0), sort_dataframe_inplace(result3, 0))
# merge on `on`
expected4 = df1.merge(df2, how='right', on='a')
jdf4 = mdf1.merge(mdf2, how='right', on='a')
result4 = jdf4.execute().fetch()
expected4.set_index('a', inplace=True)
result4.set_index('a', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected4, 0), sort_dataframe_inplace(result4, 0))
# merge on multiple columns
expected5 = df1.merge(df2, how='inner', on=['a', 'b'])
jdf5 = mdf1.merge(mdf2, how='inner', on=['a', 'b'])
result5 = jdf5.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected5, 0), sort_dataframe_inplace(result5, 0))
# merge when some on is index
expected6 = df3.merge(df2, how='inner', left_on='index', right_on='a')
jdf6 = mdf3.merge(mdf2, how='inner', left_on='index', right_on='a')
result6 = jdf6.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected6, 0), sort_dataframe_inplace(result6, 0))
# merge when on is in MultiIndex
expected7 = df4.merge(df2, how='inner', left_on='i1', right_on='a')
jdf7 = mdf4.merge(mdf2, how='inner', left_on='i1', right_on='a')
result7 = jdf7.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected7, 0), sort_dataframe_inplace(result7, 0))
# merge when on is in MultiIndex, and on not in index
expected8 = df4.merge(df2, how='inner', on=['a', 'b'])
jdf8 = mdf4.merge(mdf2, how='inner', on=['a', 'b'])
result8 = jdf8.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected8, 0), sort_dataframe_inplace(result8, 0))
def test_join(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]], index=['a1', 'a2', 'a3'])
df2 = pd.DataFrame([[1, 2, 3], [1, 5, 6], [7, 8, 9]], index=['a1', 'b2', 'b3']) + 1
df2 = pd.concat([df2, df2 + 1])
mdf1 = from_pandas(df1, chunk_size=2)
mdf2 = from_pandas(df2, chunk_size=2)
# default `how`
expected0 = df1.join(df2, lsuffix='l_', rsuffix='r_')
jdf0 = mdf1.join(mdf2, lsuffix='l_', rsuffix='r_')
result0 = jdf0.execute().fetch()
pd.testing.assert_frame_equal(expected0.sort_index(), result0.sort_index())
# how = 'left'
expected1 = df1.join(df2, how='left', lsuffix='l_', rsuffix='r_')
jdf1 = mdf1.join(mdf2, how='left', lsuffix='l_', rsuffix='r_')
result1 = jdf1.execute().fetch()
pd.testing.assert_frame_equal(expected1.sort_index(), result1.sort_index())
# how = 'right'
expected2 = df1.join(df2, how='right', lsuffix='l_', rsuffix='r_')
jdf2 = mdf1.join(mdf2, how='right', lsuffix='l_', rsuffix='r_')
result2 = jdf2.execute().fetch()
pd.testing.assert_frame_equal(expected2.sort_index(), result2.sort_index())
# how = 'inner'
expected3 = df1.join(df2, how='inner', lsuffix='l_', rsuffix='r_')
jdf3 = mdf1.join(mdf2, how='inner', lsuffix='l_', rsuffix='r_')
result3 = jdf3.execute().fetch()
pd.testing.assert_frame_equal(expected3.sort_index(), result3.sort_index())
# how = 'outer'
expected4 = df1.join(df2, how='outer', lsuffix='l_', rsuffix='r_')
jdf4 = mdf1.join(mdf2, how='outer', lsuffix='l_', rsuffix='r_')
result4 = jdf4.execute().fetch()
pd.testing.assert_frame_equal(expected4.sort_index(), result4.sort_index())
def test_join_on(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]], columns=['a1', 'a2', 'a3'])
df2 = pd.DataFrame([[1, 2, 3], [1, 5, 6], [7, 8, 9]], columns=['a1', 'b2', 'b3']) + 1
df2 = pd.concat([df2, df2 + 1])
mdf1 = from_pandas(df1, chunk_size=2)
mdf2 = from_pandas(df2, chunk_size=2)
expected0 = df1.join(df2, on=None, lsuffix='_l', rsuffix='_r')
jdf0 = mdf1.join(mdf2, on=None, lsuffix='_l', rsuffix='_r')
result0 = jdf0.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected0, 0), sort_dataframe_inplace(result0, 0))
expected1 = df1.join(df2, how='left', on='a1', lsuffix='_l', rsuffix='_r')
jdf1 = mdf1.join(mdf2, how='left', on='a1', lsuffix='_l', rsuffix='_r')
result1 = jdf1.execute().fetch()
# Note [Columns of Left Join]
#
# I believe we have no chance to obtain the entirely same result with pandas here:
#
# Look at the following example:
#
# >>> df1
# a1 a2 a3
# 0 1 3 3
# >>> df2
# a1 b2 b3
# 1 2 6 7
# >>> df3
# a1 b2 b3
# 1 2 6 7
# 1 2 6 7
#
# >>> df1.merge(df2, how='left', left_on='a1', left_index=False, right_index=True)
# a1_x a2 a3 a1_y b2 b3
# 0 1 3 3 2 6 7
# >>> df1.merge(df3, how='left', left_on='a1', left_index=False, right_index=True)
# a1 a1_x a2 a3 a1_y b2 b3
# 0 1 1 3 3 2 6 7
# 0 1 1 3 3 2 6 7
#
# Note that the result of `df1.merge(df3)` has an extra column `a` compared to `df1.merge(df2)`.
# The value of column `a` is the same of `a1_x`, just because `1` occurs twice in index of `df3`.
# I haven't invistagated why pandas has such behaviour...
#
# We cannot yield the same result with pandas, because, the `df3` is chunked, then some of the
# result chunk has 6 columns, others may have 7 columns, when concatenated into one DataFrame
# some cells of column `a` will have value `NaN`, which is different from the result of pandas.
#
# But we can guarantee that other effective columns have absolutely same value with pandas.
columns_to_compare = jdf1.columns_value.to_pandas()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected1[columns_to_compare], 0, 1),
sort_dataframe_inplace(result1[columns_to_compare], 0, 1))
# Note [Index of Join on EmptyDataFrame]
#
# It is tricky that it is non-trivial to get the same `index` result with pandas.
#
# Look at the following example:
#
# >>> df1
# a1 a2 a3
# 1 4 2 6
# >>> df2
# a1 b2 b3
# 1 2 6 7
# 2 8 9 10
# >>> df3
# Empty DataFrame
# Columns: [a1, a2, a3]
# Index: []
# >>> df1.join(df2, how='right', on='a2', lsuffix='_l', rsuffix='_r')
# a1_l a2 a3 a1_r b2 b3
# 1.0 4.0 2 6.0 8 9 10
# NaN NaN 1 NaN 2 6 7
# >>> df3.join(df2, how='right', on='a2', lsuffix='_l', rsuffix='_r')
# a1_l a2 a3 a1_r b2 b3
# 1 NaN 1 NaN 2 6 7
# 2 NaN 2 NaN 8 9 10
#
# When the `left` dataframe is not empty, the mismatched rows in `right` will have index value `NaN`,
# and the matched rows have index value from `right`. When the `left` dataframe is empty, the mismatched
# rows have index value from `right`.
#
# Since we chunked the `left` dataframe, it is uneasy to obtain the same index value with pandas in the
# final result dataframe, but we guaranteed that the dataframe content is correctly.
expected2 = df1.join(df2, how='right', on='a2', lsuffix='_l', rsuffix='_r')
jdf2 = mdf1.join(mdf2, how='right', on='a2', lsuffix='_l', rsuffix='_r')
result2 = jdf2.execute().fetch()
expected2.set_index('a2', inplace=True)
result2.set_index('a2', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected2, 0), sort_dataframe_inplace(result2, 0))
expected3 = df1.join(df2, how='inner', on='a2', lsuffix='_l', rsuffix='_r')
jdf3 = mdf1.join(mdf2, how='inner', on='a2', lsuffix='_l', rsuffix='_r')
result3 = jdf3.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected3, 0), sort_dataframe_inplace(result3, 0))
expected4 = df1.join(df2, how='outer', on='a2', lsuffix='_l', rsuffix='_r')
jdf4 = mdf1.join(mdf2, how='outer', on='a2', lsuffix='_l', rsuffix='_r')
result4 = jdf4.execute().fetch()
expected4.set_index('a2', inplace=True)
result4.set_index('a2', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected4, 0), sort_dataframe_inplace(result4, 0))
def test_merge_one_chunk(setup):
df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 5]}, index=['a1', 'a2', 'a3', 'a4'])
df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
'value': [5, 6, 7, 8]}, index=['a1', 'a2', 'a3', 'a4'])
# all have one chunk
mdf1 = from_pandas(df1)
mdf2 = from_pandas(df2)
expected = df1.merge(df2, left_on='lkey', right_on='rkey')
jdf = mdf1.merge(mdf2, left_on='lkey', right_on='rkey')
result = jdf.execute().fetch()
pd.testing.assert_frame_equal(expected.sort_values(by=expected.columns[1]).reset_index(drop=True),
result.sort_values(by=result.columns[1]).reset_index(drop=True))
# left have one chunk
mdf1 = from_pandas(df1)
mdf2 = from_pandas(df2, chunk_size=2)
expected = df1.merge(df2, left_on='lkey', right_on='rkey')
jdf = mdf1.merge(mdf2, left_on='lkey', right_on='rkey')
result = jdf.execute().fetch()
pd.testing.assert_frame_equal(expected.sort_values(by=expected.columns[1]).reset_index(drop=True),
result.sort_values(by=result.columns[1]).reset_index(drop=True))
# right have one chunk
mdf1 = from_pandas(df1, chunk_size=3)
mdf2 = from_pandas(df2)
expected = df1.merge(df2, left_on='lkey', right_on='rkey')
jdf = mdf1.merge(mdf2, left_on='lkey', right_on='rkey')
result = jdf.execute().fetch()
pd.testing.assert_frame_equal(expected.sort_values(by=expected.columns[1]).reset_index(drop=True),
result.sort_values(by=result.columns[1]).reset_index(drop=True))
def test_merge_on_duplicate_columns(setup):
raw1 = pd.DataFrame([['foo', 1, 'bar'],
['bar', 2, 'foo'],
['baz', 3, 'foo']],
columns=['lkey', 'value', 'value'],
index=['a1', 'a2', 'a3'])
raw2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
'value': [5, 6, 7, 8]}, index=['a1', 'a2', 'a3', 'a4'])
df1 = from_pandas(raw1, chunk_size=2)
df2 = from_pandas(raw2, chunk_size=3)
r = df1.merge(df2, left_on='lkey', right_on='rkey')
result = r.execute().fetch()
expected = raw1.merge(raw2, left_on='lkey', right_on='rkey')
pd.testing.assert_frame_equal(expected, result)
def test_append_execution(setup):
df1 = pd.DataFrame(np.random.rand(10, 4), columns=list('ABCD'))
df2 = pd.DataFrame(np.random.rand(10, 4), columns=list('ABCD'))
mdf1 = from_pandas(df1, chunk_size=3)
mdf2 = from_pandas(df2, chunk_size=3)
adf = mdf1.append(mdf2)
expected = df1.append(df2)
result = adf.execute().fetch()
pd.testing.assert_frame_equal(expected, result)
adf = mdf1.append(mdf2, ignore_index=True)
expected = df1.append(df2, ignore_index=True)
result = adf.execute(extra_config={'check_index_value': False}).fetch()
pd.testing.assert_frame_equal(expected, result)
mdf1 = from_pandas(df1, chunk_size=3)
mdf2 = from_pandas(df2, chunk_size=2)
adf = mdf1.append(mdf2)
expected = df1.append(df2)
result = adf.execute().fetch()
pd.testing.assert_frame_equal(expected, result)
adf = mdf1.append(mdf2, ignore_index=True)
expected = df1.append(df2, ignore_index=True)
result = adf.execute(extra_config={'check_index_value': False}).fetch()
pd.testing.assert_frame_equal(expected, result)
df3 = pd.DataFrame(np.random.rand(8, 4), columns=list('ABCD'))
mdf3 = from_pandas(df3, chunk_size=3)
expected = df1.append([df2, df3])
adf = mdf1.append([mdf2, mdf3])
result = adf.execute().fetch()
pd.testing.assert_frame_equal(expected, result)
adf = mdf1.append(dict(A=1, B=2, C=3, D=4), ignore_index=True)
expected = df1.append(dict(A=1, B=2, C=3, D=4), ignore_index=True)
result = adf.execute(extra_config={'check_index_value': False}).fetch()
pd.testing.assert_frame_equal(expected, result)
# test for series
series1 = pd.Series(np.random.rand(10,))
series2 = pd.Series(np.random.rand(10,))
mseries1 = series_from_pandas(series1, chunk_size=3)
mseries2 = series_from_pandas(series2, chunk_size=3)
aseries = mseries1.append(mseries2)
expected = series1.append(series2)
result = aseries.execute().fetch()
pd.testing.assert_series_equal(expected, result)
aseries = mseries1.append(mseries2, ignore_index=True)
expected = series1.append(series2, ignore_index=True)
result = aseries.execute(extra_config={'check_index_value': False}).fetch()
pd.testing.assert_series_equal(expected, result)
mseries1 = series_from_pandas(series1, chunk_size=3)
mseries2 = series_from_pandas(series2, chunk_size=2)
aseries = mseries1.append(mseries2)
expected = series1.append(series2)
result = aseries.execute().fetch()
pd.testing.assert_series_equal(expected, result)
aseries = mseries1.append(mseries2, ignore_index=True)
expected = series1.append(series2, ignore_index=True)
result = aseries.execute(extra_config={'check_index_value': False}).fetch()
pd.testing.assert_series_equal(expected, result)
series3 = pd.Series(np.random.rand(4,))
mseries3 = series_from_pandas(series3, chunk_size=2)
expected = series1.append([series2, series3])
aseries = mseries1.append([mseries2, mseries3])
result = aseries.execute().fetch()
pd.testing.assert_series_equal(expected, result)
def test_concat(setup):
df1 = pd.DataFrame(np.random.rand(10, 4), columns=list('ABCD'))
df2 = pd.DataFrame(np.random.rand(10, 4), columns=list('ABCD'))
mdf1 = from_pandas(df1, chunk_size=3)
mdf2 = from_pandas(df2, chunk_size=3)
r = concat([mdf1, mdf2])
expected = pd.concat([df1, df2])
result = r.execute().fetch()
pd.testing.assert_frame_equal(expected, result)
# test different chunk size and ignore_index=True
mdf1 = from_pandas(df1, chunk_size=2)
mdf2 = from_pandas(df2, chunk_size=3)
r = concat([mdf1, mdf2], ignore_index=True)
expected = pd.concat([df1, df2], ignore_index=True)
result = r.execute(extra_config={'check_index_value': False}).fetch()
pd.testing.assert_frame_equal(expected, result)
# test axis=1
mdf1 = from_pandas(df1, chunk_size=2)
mdf2 = from_pandas(df2, chunk_size=3)
r = concat([mdf1, mdf2], axis=1)
expected = pd.concat([df1, df2], axis=1)
result = r.execute().fetch()
pd.testing.assert_frame_equal(expected, result)
# test multiply dataframes
r = concat([mdf1, mdf2, mdf1])
expected = pd.concat([df1, df2, df1])
result = r.execute().fetch()
pd.testing.assert_frame_equal(expected, result)
df1 = pd.DataFrame(np.random.rand(10, 4), columns=list('ABCD'))
df2 = pd.DataFrame(np.random.rand(10, 3), columns=list('ABC'))
mdf1 = from_pandas(df1, chunk_size=3)
mdf2 = from_pandas(df2, chunk_size=3)
# test join=inner
r = concat([mdf1, mdf2], join='inner')
expected = pd.concat([df1, df2], join='inner')
result = r.execute().fetch()
pd.testing.assert_frame_equal(expected, result)
# test for series
series1 = pd.Series(np.random.rand(10,))
series2 = pd.Series(np.random.rand(10,))
mseries1 = series_from_pandas(series1, chunk_size=3)
mseries2 = series_from_pandas(series2, chunk_size=3)
r = concat([mseries1, mseries2])
expected = pd.concat([series1, series2])
result = r.execute().fetch()
pd.testing.assert_series_equal(result, expected)
# test different series and ignore_index
mseries1 = series_from_pandas(series1, chunk_size=4)
mseries2 = series_from_pandas(series2, chunk_size=3)
r = concat([mseries1, mseries2], ignore_index=True)
expected = pd.concat([series1, series2], ignore_index=True)
result = r.execute(extra_config={'check_index_value': False}).fetch()
| pd.testing.assert_series_equal(result, expected) | pandas.testing.assert_series_equal |
#OUTDATED
#Splices exons from genes together to construct a region approximating a gene
#Tracks process time
#pc_gene_eo.csv generated by awk filtering of mouse gene annoatation for protein_coding, exon_number, chr, with # lines removed
#pc_ids_eo.csv generated by awk filtering of mouse gene annoatation for protein_coding, exon_number, chr, with # lines removed for column $9
#uq_pc_ids_eo.csv generated by awk filtering of pc_ids_eo.csv for unique lines
#Exports constructed_genes.csv
import time
import pandas as pd
def duplicates(list, item):
"""Returns index locations of item in list"""
return [i for i, x in enumerate(list) if x == item]
panda_import_time = time.process_time()
print("pandas imported in " + str(panda_import_time))
#load gene data
gene_data_df = | pd.read_csv("data/mm10_data/pc_genes_eo.csv", header=None, index_col=False) | pandas.read_csv |
"""Run unit tests.
Use this to run tests and understand how tasks.py works.
Example:
Create directories::
mkdir -p test-data/input
mkdir -p test-data/output
Run tests::
pytest test_combine.py -s
Notes:
* this will create sample csv, xls and xlsx files
* test_combine_() test the main combine function
"""
from d6tstack.combine_files import *
from d6tstack.combine_csv import *
from d6tstack.combine_xls import *
import pandas as pd
import ntpath
import pytest
cfg_fname_base_in = 'test-data/input/test-data-'
cfg_fname_base_out_dir = 'test-data/output'
cfg_fname_base_out = cfg_fname_base_out_dir+'/test-data-'
#************************************************************
# fixtures
#************************************************************
class TestLogPusher(object):
def __init__(self, event):
pass
def send_log(self, msg, status):
pass
def send(self, data):
pass
logger = TestLogPusher('combiner')
# sample data
def create_files_df_clean():
# create sample data
df1=pd.DataFrame({'date':pd.date_range('1/1/2011', periods=10), 'sales': 100, 'cost':-80, 'profit':20})
df2=pd.DataFrame({'date':pd.date_range('2/1/2011', periods=10), 'sales': 200, 'cost':-90, 'profit':200-90})
df3=pd.DataFrame({'date':pd.date_range('3/1/2011', periods=10), 'sales': 300, 'cost':-100, 'profit':300-100})
# cfg_col = [ 'date', 'sales','cost','profit']
# return df1[cfg_col], df2[cfg_col], df3[cfg_col]
return df1, df2, df3
def create_files_df_clean_combine():
df1,df2,df3 = create_files_df_clean()
df_all = pd.concat([df1,df2,df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
def create_files_df_colmismatch_combine(cfg_col_common):
df1, df2, df3 = create_files_df_clean()
df3['profit2']=df3['profit']*2
if cfg_col_common:
df_all = pd.concat([df1, df2, df3], join='inner')
else:
df_all = pd.concat([df1, df2, df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
def create_files_df_colmismatch_combine2(cfg_col_common):
df1, df2, df3 = create_files_df_clean()
for i in range(15):
df3['profit'+str(i)]=df3['profit']*2
if cfg_col_common:
df_all = pd.concat([df1, df2, df3], join='inner')
else:
df_all = pd.concat([df1, df2, df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
# csv standard
@pytest.fixture(scope="module")
def create_files_csv():
df1,df2,df3 = create_files_df_clean()
# save files
cfg_fname = cfg_fname_base_in+'input-csv-clean-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colmismatch():
df1,df2,df3 = create_files_df_clean()
df3['profit2']=df3['profit']*2
# save files
cfg_fname = cfg_fname_base_in+'input-csv-colmismatch-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colmismatch2():
df1,df2,df3 = create_files_df_clean()
for i in range(15):
df3['profit'+str(i)]=df3['profit']*2
# save files
cfg_fname = cfg_fname_base_in+'input-csv-colmismatch2-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colreorder():
df1,df2,df3 = create_files_df_clean()
cfg_col = [ 'date', 'sales','cost','profit']
cfg_col2 = [ 'date', 'sales','profit','cost']
# return df1[cfg_col], df2[cfg_col], df3[cfg_col]
# save files
cfg_fname = cfg_fname_base_in+'input-csv-reorder-%s.csv'
df1[cfg_col].to_csv(cfg_fname % 'jan',index=False)
df2[cfg_col].to_csv(cfg_fname % 'feb',index=False)
df3[cfg_col2].to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_noheader():
df1,df2,df3 = create_files_df_clean()
# save files
cfg_fname = cfg_fname_base_in+'input-noheader-csv-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False, header=False)
df2.to_csv(cfg_fname % 'feb',index=False, header=False)
df3.to_csv(cfg_fname % 'mar',index=False, header=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
def create_files_csv_dirty(cfg_sep=",", cfg_header=True):
df1,df2,df3 = create_files_df_clean()
df1.to_csv(cfg_fname_base_in+'debug.csv',index=False, sep=cfg_sep, header=cfg_header)
return cfg_fname_base_in+'debug.csv'
# excel single-tab
def create_files_xls_single_helper(cfg_fname):
df1,df2,df3 = create_files_df_clean()
df1.to_excel(cfg_fname % 'jan',index=False)
df2.to_excel(cfg_fname % 'feb',index=False)
df3.to_excel(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_xls_single():
return create_files_xls_single_helper(cfg_fname_base_in+'input-xls-sing-%s.xls')
@pytest.fixture(scope="module")
def create_files_xlsx_single():
return create_files_xls_single_helper(cfg_fname_base_in+'input-xls-sing-%s.xlsx')
# excel multi-tab
def create_files_xls_multiple_helper(cfg_fname):
def write_file(dfg,fname):
writer = pd.ExcelWriter(fname)
dfg.to_excel(writer,'Sheet1',index=False)
dfg.to_excel(writer,'Sheet2',index=False)
writer.save()
df1,df2,df3 = create_files_df_clean()
write_file(df1,cfg_fname % 'jan')
write_file(df2,cfg_fname % 'feb')
write_file(df3,cfg_fname % 'mar')
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_xls_multiple():
return create_files_xls_multiple_helper(cfg_fname_base_in+'input-xls-mult-%s.xls')
@pytest.fixture(scope="module")
def create_files_xlsx_multiple():
return create_files_xls_multiple_helper(cfg_fname_base_in+'input-xls-mult-%s.xlsx')
#************************************************************
# tests - helpers
#************************************************************
def test_file_extensions_get():
fname_list = ['a.csv','b.csv']
ext_list = file_extensions_get(fname_list)
assert ext_list==['.csv','.csv']
fname_list = ['a.xls','b.xls']
ext_list = file_extensions_get(fname_list)
assert ext_list==['.xls','.xls']
def test_file_extensions_all_equal():
ext_list = ['.csv']*2
assert file_extensions_all_equal(ext_list)
ext_list = ['.xls']*2
assert file_extensions_all_equal(ext_list)
ext_list = ['.csv','.xls']
assert not file_extensions_all_equal(ext_list)
def test_file_extensions_valid():
ext_list = ['.csv']*2
assert file_extensions_valid(ext_list)
ext_list = ['.xls']*2
assert file_extensions_valid(ext_list)
ext_list = ['.exe','.xls']
assert not file_extensions_valid(ext_list)
#************************************************************
#************************************************************
# combine_csv
#************************************************************
#************************************************************
def test_csv_sniff_single(create_files_csv, create_files_csv_noheader):
sniff = CSVSniffer(create_files_csv[0])
sniff.get_delim()
assert sniff.delim == ','
assert sniff.count_skiprows() == 0
assert sniff.has_header()
fname = create_files_csv_dirty("|")
sniff = CSVSniffer(fname)
sniff.get_delim()
assert sniff.delim == "|"
assert sniff.has_header()
df1,df2,df3 = create_files_df_clean()
assert sniff.nrows == df1.shape[0]+1
# no header test
sniff = CSVSniffer(create_files_csv_noheader[0])
sniff.get_delim()
assert sniff.delim == ','
assert sniff.count_skiprows() == 0
assert not sniff.has_header()
def test_csv_sniff_multi(create_files_csv, create_files_csv_noheader):
sniff = CSVSnifferList(create_files_csv)
assert sniff.get_delim() == ','
assert sniff.count_skiprows() == 0
assert sniff.has_header()
# no header test
sniff = CSVSnifferList(create_files_csv_noheader)
sniff.get_delim()
assert sniff.get_delim() == ','
assert sniff.count_skiprows() == 0
assert not sniff.has_header()
def test_CombinerCSV_columns(create_files_csv, create_files_csv_colmismatch, create_files_csv_colreorder):
fname_list = create_files_csv
combiner = CombinerCSV(fname_list=fname_list, all_strings=True)
col_preview = combiner.preview_columns()
# todo: cache the preview dfs somehow? reading the same in next step
assert col_preview['is_all_equal']
assert col_preview['columns_all']==col_preview['columns_common']
assert col_preview['columns_all']==['cost', 'date', 'profit', 'sales']
fname_list = create_files_csv_colmismatch
combiner = CombinerCSV(fname_list=fname_list, all_strings=True)
col_preview = combiner.preview_columns()
# todo: cache the preview dfs somehow? reading the same in next step
assert not col_preview['is_all_equal']
assert not col_preview['columns_all']==col_preview['columns_common']
assert col_preview['columns_all']==['cost', 'date', 'profit', 'profit2', 'sales']
assert col_preview['columns_common']==['cost', 'date', 'profit', 'sales']
assert col_preview['columns_unique']==['profit2']
fname_list = create_files_csv_colreorder
combiner = CombinerCSV(fname_list=fname_list, all_strings=True)
col_preview = combiner.preview_columns()
assert not col_preview['is_all_equal']
assert col_preview['columns_all']==col_preview['columns_common']
def test_CombinerCSV_combine(create_files_csv, create_files_csv_colmismatch, create_files_csv_colreorder):
# all columns present
fname_list = create_files_csv
combiner = CombinerCSV(fname_list=fname_list, all_strings=True)
df = combiner.combine()
df = df.sort_values('date').drop(['filename'],axis=1)
df_chk = create_files_df_clean_combine()
assert df.equals(df_chk)
df = combiner.combine()
df = df.groupby('filename').head(combiner.nrows_preview)
df_chk = combiner.preview_combine()
assert df.equals(df_chk)
# columns mismatch, all columns
fname_list = create_files_csv_colmismatch
combiner = CombinerCSV(fname_list=fname_list, all_strings=True)
df = combiner.combine()
df = df.sort_values('date').drop(['filename'],axis=1)
df_chk = create_files_df_colmismatch_combine(cfg_col_common=False)
assert df.shape[1] == df_chk.shape[1]
# columns mismatch, common columns
df = combiner.combine(is_col_common=True)
df = df.sort_values('date').drop(['filename'], axis=1)
df_chk = create_files_df_colmismatch_combine(cfg_col_common=True)
assert df.shape[1] == df_chk.shape[1]
def test_CombinerCSVAdvanced_combine(create_files_csv):
# Check if rename worked correctly.
fname_list = create_files_csv
combiner = CombinerCSV(fname_list=fname_list, all_strings=True)
adv_combiner = CombinerCSVAdvanced(combiner, cfg_col_sel=None, cfg_col_rename={'date':'date1'})
df = adv_combiner.combine()
assert 'date1' in df.columns.values
assert 'date' not in df.columns.values
df = adv_combiner.preview_combine()
assert 'date1' in df.columns.values
assert 'date' not in df.columns.values
adv_combiner = CombinerCSVAdvanced(combiner, cfg_col_sel=['cost', 'date', 'profit', 'profit2', 'sales'])
df = adv_combiner.combine()
assert 'profit2' in df.columns.values
assert df['profit2'].isnull().all()
df = adv_combiner.preview_combine()
assert 'profit2' in df.columns.values
assert df['profit2'].isnull().all()
#************************************************************
# combine_xls
#************************************************************
def test_xls_scan_sheets_single(create_files_xls_single,create_files_xlsx_single):
def helper(fnames):
xlsSniffer = XLSSniffer(fnames)
sheets = xlsSniffer.dict_xls_sheets
assert np.all([file['sheets_names']==['Sheet1'] for file in sheets.values()])
assert np.all([file['sheets_count']==1 for file in sheets.values()])
assert xlsSniffer.all_same_count()
assert xlsSniffer.all_same_names()
assert xlsSniffer.all_contain_sheetname('Sheet1')
assert xlsSniffer.all_have_idx(0)
assert not xlsSniffer.all_have_idx(1)
helper(create_files_xls_single)
helper(create_files_xlsx_single)
def test_xls_scan_sheets_multipe(create_files_xls_multiple,create_files_xlsx_multiple):
def helper(fnames):
xlsSniffer = XLSSniffer(fnames)
sheets = xlsSniffer.dict_xls_sheets
assert np.all([file['sheets_names']==['Sheet1', 'Sheet2'] for file in sheets.values()])
assert np.all([file['sheets_count']==2 for file in sheets.values()])
helper(create_files_xls_multiple)
helper(create_files_xlsx_multiple)
#todo: wrong file raises exception NotImplementedError
#************************************************************
# tests - ui
#************************************************************
def test_combine_csv(create_files_csv):
r = combine_files(create_files_csv, '', logger, cfg_return_df=True)
assert r['status']=='complete'
df = r['data']
df2 = r['data'].copy().reset_index(drop=True)
df = df.sort_values('date').drop(['filename'],axis=1)
df_chk = create_files_df_clean_combine()
assert df.equals(df_chk)
r = combine_files(create_files_csv, cfg_fname_base_out_dir, logger, cfg_return_df=False)
assert r['status'] == 'complete'
df = pd.read_csv(cfg_fname_base_out_dir+'/combined.csv', dtype=str)
assert sorted(df.columns)==sorted(df2.columns)
assert df.equals(df2[df.columns])
df_sample = | pd.read_csv(cfg_fname_base_out_dir+'/combined-sample.csv',dtype=str) | pandas.read_csv |
# pylint: disable=W0612,E1101
from datetime import datetime
import os
import operator
import unittest
import numpy as np
from pandas.core.api import DataFrame, Index, notnull
from pandas.core.datetools import bday
from pandas.core.frame import group_agg
from pandas.core.panel import WidePanel, LongPanel, pivot
import pandas.core.panel as panelmod
from pandas.util.testing import (assert_panel_equal,
assert_frame_equal,
assert_series_equal,
assert_almost_equal)
import pandas.core.panel as panelm
import pandas.util.testing as common
class PanelTests(object):
panel = None
def test_pickle(self):
import cPickle
pickled = cPickle.dumps(self.panel)
unpickled = cPickle.loads(pickled)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
class SafeForLongAndSparse(object):
def test_repr(self):
foo = repr(self.panel)
def test_iter(self):
common.equalContents(list(self.panel), self.panel.items)
def _check_statistic(self, frame, name, alternative):
f = getattr(frame, name)
for i, ax in enumerate(['items', 'major', 'minor']):
result = f(axis=i)
assert_frame_equal(result, frame.apply(alternative, axis=ax))
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_statistic(self.panel, 'count', f)
def test_sum(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) == 0:
return np.NaN
else:
return nona.sum()
self._check_statistic(self.panel, 'sum', f)
def test_prod(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) == 0:
return np.NaN
else:
return np.prod(nona)
self._check_statistic(self.panel, 'prod', f)
def test_mean(self):
def f(x):
x = np.asarray(x)
return x[notnull(x)].mean()
self._check_statistic(self.panel, 'mean', f)
def test_median(self):
def f(x):
x = np.asarray(x)
return np.median(x[notnull(x)])
self._check_statistic(self.panel, 'median', f)
def test_min(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) == 0:
return np.NaN
else:
return nona.min()
self._check_statistic(self.panel, 'min', f)
def test_max(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) == 0:
return np.NaN
else:
return nona.max()
self._check_statistic(self.panel, 'max', f)
def test_var(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) < 2:
return np.NaN
else:
return nona.var(ddof=1)
self._check_statistic(self.panel, 'var', f)
def test_std(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) < 2:
return np.NaN
else:
return nona.std(ddof=1)
self._check_statistic(self.panel, 'std', f)
def test_skew(self):
return
try:
from scipy.stats import skew
except ImportError:
return
def f(x):
x = np.asarray(x)
return skew(x[notnull(x)], bias=False)
self._check_statistic(self.panel, 'skew', f)
class SafeForSparse(object):
@staticmethod
def assert_panel_equal(x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert(self.panel._get_axis(0) is self.panel.items)
assert(self.panel._get_axis(1) is self.panel.major_axis)
assert(self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
self.panel.items = new_items
self.assert_(self.panel.items is new_items)
self.panel.major_axis = new_major
self.assert_(self.panel.major_axis is new_major)
self.panel.minor_axis = new_minor
self.assert_(self.panel.minor_axis is new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.div)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
common.equalContents(self.panel.keys(), self.panel.items)
def test_iteritems(self):
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
check_op(operator.add, 'add')
check_op(operator.sub, 'subtract')
check_op(operator.mul, 'multiply')
check_op(operator.div, 'divide')
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo',), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
class TestWidePanel(unittest.TestCase, PanelTests,
SafeForLongAndSparse,
SafeForSparse):
@staticmethod
def assert_panel_equal(x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = common.makeWidePanel()
common.add_nans(self.panel)
def test_constructor(self):
# with BlockManager
wp = WidePanel(self.panel._data)
self.assert_(wp._data is self.panel._data)
wp = WidePanel(self.panel._data, copy=True)
self.assert_(wp._data is not self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = WidePanel([[['foo', 'foo', 'foo',],
['foo', 'foo', 'foo']]])
self.assert_(wp.values.dtype == np.object_)
vals = self.panel.values
# no copy
wp = WidePanel(vals)
self.assert_(wp.values is vals)
# copy
wp = WidePanel(vals, copy=True)
self.assert_(wp.values is not vals)
def test_constructor_cast(self):
casted = WidePanel(self.panel._data, dtype=int)
casted2 = WidePanel(self.panel.values, dtype=int)
exp_values = self.panel.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [['foo', 'bar', 'baz']]
self.assertRaises(ValueError, DataFrame, data, dtype=float)
def test_consolidate(self):
self.assert_(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assert_(not self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assert_(panel._data.is_consolidated())
def test_from_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A' : itema, 'B' : itemb[5:]}
d2 = {'A' : itema._series, 'B' : itemb[5:]._series}
d3 = {'A' : DataFrame(itema._series),
'B' : DataFrame(itemb[5:]._series)}
wp = WidePanel.from_dict(d)
wp2 = WidePanel.from_dict(d2) # nested Dict
wp3 = WidePanel.from_dict(d3)
self.assert_(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(wp, wp2)
# intersect
wp = WidePanel.from_dict(d, intersect=True)
self.assert_(wp.major_axis.equals(itemb.index[5:]))
def test_from_dict_mixed(self):
pass
def test_values(self):
self.assertRaises(Exception, WidePanel, np.random.randn(5, 5, 5),
range(5), range(5), range(4))
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assert_('ItemA' not in self.panel.items)
del self.panel['ItemB']
self.assert_('ItemB' not in self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = WidePanel(values, range(3), range(3), range(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA']).to_long()
self.panel['ItemE'] = lp
lp = self.panel.filter(['ItemA', 'ItemB']).to_long()
self.assertRaises(Exception, self.panel.__setitem__,
'ItemE', lp)
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index,
columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = 1
# object dtype
self.panel['ItemQ'] = 'foo'
self.assert_(self.panel['ItemQ'].values.dtype == np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assert_(self.panel['ItemP'].values.dtype == np.bool_)
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
assert(conformed.index.equals(self.panel.major_axis))
assert(conformed.columns.equals(self.panel.minor_axis))
def test_reindex(self):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel.reindex,
major_axis=new_major, major=new_major)
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis)
assert(result.items is self.panel.items)
assert(result.major_axis is self.panel.major_axis)
assert(result.minor_axis is self.panel.minor_axis)
self.assertRaises(Exception, self.panel.reindex)
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis,
method='pad')
assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
smaller.major_xs(smaller_major[0]))
# reindex_like
smaller = self.panel.reindex(items=self.panel.items[:-1],
major=self.panel.major_axis[:-1],
minor=self.panel.minor_axis[:-1])
smaller_like = self.panel.reindex_like(smaller)
assert_panel_equal(smaller, smaller_like)
def test_fillna(self):
filled = self.panel.fillna(0)
self.assert_(np.isfinite(filled.values).all())
filled = self.panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
self.panel['ItemA'].fillna(method='backfill'))
empty = self.panel.reindex(items=[])
filled = empty.fillna(0)
assert_panel_equal(filled, empty)
def test_combinePanel_with_long(self):
lng = self.panel.to_long(filter_observations=False)
result = self.panel.add(lng)
self.assert_panel_equal(result, self.panel * 2)
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
assert_series_equal(xs['ItemA'], ref.xs(idx))
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assert_(xs['ItemA'].dtype == np.float64)
self.assert_(xs['ItemD'].dtype == np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx])
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assert_(xs['ItemA'].dtype == np.float64)
self.assert_(xs['ItemD'].dtype == np.object_)
def test_swapaxes(self):
result = self.panel.swapaxes('items', 'minor')
self.assert_(result.items is self.panel.minor_axis)
result = self.panel.swapaxes('items', 'major')
self.assert_(result.items is self.panel.major_axis)
result = self.panel.swapaxes('major', 'minor')
self.assert_(result.major_axis is self.panel.minor_axis)
# this should also work
result = self.panel.swapaxes(0, 1)
self.assert_(result.items is self.panel.major_axis)
# this should also work
self.assertRaises(Exception, self.panel.swapaxes, 'items', 'items')
def test_to_long(self):
# filtered
filtered = self.panel.to_long()
# unfiltered
unfiltered = self.panel.to_long(filter_observations=False)
assert_panel_equal(unfiltered.to_wide(), self.panel)
def test_to_long_mixed(self):
panel = self.panel.fillna(0)
panel['str'] = 'foo'
panel['bool'] = panel['ItemA'] > 0
lp = panel.to_long()
wp = lp.to_wide()
self.assertEqual(wp['bool'].values.dtype, np.bool_)
assert_frame_equal(wp['bool'], panel['bool'])
def test_filter(self):
pass
def test_apply(self):
pass
def test_compound(self):
compounded = self.panel.compound()
assert_series_equal(compounded['ItemA'],
(1 + self.panel['ItemA']).product(0) - 1)
def test_shift(self):
# major
idx = self.panel.major_axis[0]
idx_lag = self.panel.major_axis[1]
shifted = self.panel.shift(1)
assert_frame_equal(self.panel.major_xs(idx),
shifted.major_xs(idx_lag))
# minor
idx = self.panel.minor_axis[0]
idx_lag = self.panel.minor_axis[1]
shifted = self.panel.shift(1, axis='minor')
assert_frame_equal(self.panel.minor_xs(idx),
shifted.minor_xs(idx_lag))
self.assertRaises(Exception, self.panel.shift, 1, axis='items')
class TestLongPanel(unittest.TestCase):
def setUp(self):
panel = common.makeWidePanel()
common.add_nans(panel)
self.panel = panel.to_long()
self.unfiltered_panel = panel.to_long(filter_observations=False)
def test_pickle(self):
import cPickle
pickled = cPickle.dumps(self.panel)
unpickled = cPickle.loads(pickled)
assert_almost_equal(unpickled['ItemA'].values,
self.panel['ItemA'].values)
def test_len(self):
len(self.unfiltered_panel)
def test_constructor(self):
pass
def test_fromRecords_toRecords(self):
# structured array
K = 10
recs = np.zeros(K, dtype='O,O,f8,f8')
recs['f0'] = range(K / 2) * 2
recs['f1'] = np.arange(K) / (K / 2)
recs['f2'] = np.arange(K) * 2
recs['f3'] = np.arange(K)
lp = LongPanel.fromRecords(recs, 'f0', 'f1')
self.assertEqual(len(lp.items), 2)
lp = LongPanel.fromRecords(recs, 'f0', 'f1', exclude=['f2'])
self.assertEqual(len(lp.items), 1)
torecs = lp.toRecords()
self.assertEqual(len(torecs.dtype.names), len(lp.items) + 2)
# DataFrame
df = DataFrame.from_records(recs)
lp = LongPanel.fromRecords(df, 'f0', 'f1', exclude=['f2'])
self.assertEqual(len(lp.items), 1)
# dict of arrays
series = DataFrame.from_records(recs)._series
lp = LongPanel.fromRecords(series, 'f0', 'f1', exclude=['f2'])
self.assertEqual(len(lp.items), 1)
self.assert_('f2' in series)
self.assertRaises(Exception, LongPanel.fromRecords, np.zeros((3, 3)),
0, 1)
def test_factors(self):
# structured array
K = 10
recs = np.zeros(K, dtype='O,O,f8,f8,O,O')
recs['f0'] = ['one'] * 5 + ['two'] * 5
recs['f1'] = ['A', 'B', 'C', 'D', 'E'] * 2
recs['f2'] = np.arange(K) * 2
recs['f3'] = np.arange(K)
recs['f4'] = ['A', 'B', 'C', 'D', 'E'] * 2
recs['f5'] = ['foo', 'bar'] * 5
lp = LongPanel.fromRecords(recs, 'f0', 'f1')
def test_columns(self):
self.assert_(np.array_equal(self.panel.items, self.panel.columns))
def test_copy(self):
thecopy = self.panel.copy()
self.assert_(np.array_equal(thecopy.values, self.panel.values))
self.assert_(thecopy.values is not self.panel.values)
def test_getitem(self):
col = self.panel['ItemA']
def test_setitem(self):
self.panel['ItemE'] = self.panel['ItemA']
self.panel['ItemF'] = 1.
wp = self.panel.to_wide()
assert_frame_equal(wp['ItemA'], wp['ItemE'])
itemf = wp['ItemF'].values.ravel()
self.assert_((itemf[np.isfinite(itemf)] == 1).all())
# check exceptions raised
lp = self.panel.filter(['ItemA', 'ItemB'])
lp2 = self.panel.filter(['ItemC', 'ItemE'])
self.assertRaises(Exception, lp.__setitem__, 'foo', lp2)
def test_ops_differently_indexed(self):
# trying to set non-identically indexed panel
wp = self.panel.to_wide()
wp2 = wp.reindex(major=wp.major_axis[:-1])
lp2 = wp2.to_long()
self.assertRaises(Exception, self.panel.__setitem__, 'foo',
lp2.filter(['ItemA']))
self.assertRaises(Exception, self.panel.add, lp2)
def test_combineFrame(self):
wp = self.panel.to_wide()
result = self.panel.add(wp['ItemA'])
assert_frame_equal(result.to_wide()['ItemA'], wp['ItemA'] * 2)
def test_combinePanel(self):
wp = self.panel.to_wide()
result = self.panel.add(self.panel)
wide_result = result.to_wide()
assert_frame_equal(wp['ItemA'] * 2, wide_result['ItemA'])
# one item
result = self.panel.add(self.panel.filter(['ItemA']))
def test_operators(self):
wp = self.panel.to_wide()
result = (self.panel + 1).to_wide()
assert_frame_equal(wp['ItemA'] + 1, result['ItemA'])
def test_sort(self):
def is_sorted(arr):
return (arr[1:] > arr[:-1]).any()
sorted_minor = self.panel.sortlevel(level=1)
self.assert_(is_sorted(sorted_minor.minor_labels))
sorted_major = sorted_minor.sortlevel(level=0)
self.assert_(is_sorted(sorted_major.major_labels))
def test_to_wide(self):
pass
def test_toCSV(self):
self.panel.toCSV('__tmp__')
os.remove('__tmp__')
def test_toString(self):
from cStringIO import StringIO
buf = StringIO()
self.panel.toString(buf)
def test_swapaxes(self):
swapped = self.panel.swapaxes()
self.assert_(swapped.major_axis is self.panel.minor_axis)
# what else to test here?
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end).to_wide()
expected = self.panel.to_wide()['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start).to_wide()
expected = self.panel.to_wide()['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end).to_wide()
expected = self.panel.to_wide()['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# truncate on dates that aren't in there
wp = self.panel.to_wide()
new_index = wp.major_axis[::5]
wp2 = wp.reindex(major=new_index)
lp2 = wp2.to_long()
lp_trunc = lp2.truncate(wp.major_axis[2], wp.major_axis[-2])
wp_trunc = wp2.truncate(wp.major_axis[2], wp.major_axis[-2])
assert_panel_equal(wp_trunc, lp_trunc.to_wide())
# throw proper exception
self.assertRaises(Exception, lp2.truncate, wp.major_axis[-2],
wp.major_axis[2])
def test_filter(self):
pass
def test_axis_dummies(self):
minor_dummies = self.panel.get_axis_dummies('minor')
self.assertEqual(len(minor_dummies.items),
len(self.panel.minor_axis))
major_dummies = self.panel.get_axis_dummies('major')
self.assertEqual(len(major_dummies.items),
len(self.panel.major_axis))
mapping = {'A' : 'one',
'B' : 'one',
'C' : 'two',
'D' : 'two'}
transformed = self.panel.get_axis_dummies('minor',
transform=mapping.get)
self.assertEqual(len(transformed.items), 2)
self.assert_(np.array_equal(transformed.items, ['one', 'two']))
# TODO: test correctness
def test_get_dummies(self):
self.panel['Label'] = self.panel.minor_labels
minor_dummies = self.panel.get_axis_dummies('minor')
dummies = self.panel.get_dummies('Label')
self.assert_(np.array_equal(dummies.values, minor_dummies.values))
def test_apply(self):
# ufunc
applied = self.panel.apply(np.sqrt)
self.assert_(assert_almost_equal(applied.values,
np.sqrt(self.panel.values)))
def test_mean(self):
means = self.panel.mean('major')
# test versus WidePanel version
wide_means = self.panel.to_wide().mean('major')
assert_frame_equal(means, wide_means)
means_broadcast = self.panel.mean('major', broadcast=True)
self.assert_(isinstance(means_broadcast, LongPanel))
# how to check correctness?
def test_sum(self):
sums = self.panel.sum('major')
# test versus WidePanel version
wide_sums = self.panel.to_wide().sum('major')
assert_frame_equal(sums, wide_sums)
def test_count(self):
index = self.panel.index
major_count = self.panel.count(level=0)['ItemA']
labels = index.labels[0]
for i, idx in enumerate(index.levels[0]):
self.assertEqual(major_count[i], (labels == i).sum())
minor_count = self.panel.count(level=1)['ItemA']
labels = index.labels[1]
for i, idx in enumerate(index.levels[1]):
self.assertEqual(minor_count[i], (labels == i).sum())
def test_join(self):
lp1 = self.panel.filter(['ItemA', 'ItemB'])
lp2 = self.panel.filter(['ItemC'])
joined = lp1.join(lp2)
self.assertEqual(len(joined.items), 3)
self.assertRaises(Exception, lp1.join,
self.panel.filter(['ItemB', 'ItemC']))
def test_merge(self):
pass
def test_addPrefix(self):
lp = self.panel.addPrefix('foo#')
self.assertEqual(lp.items[0], 'foo#ItemA')
lp = self.panel.addPrefix()
assert_panel_equal(lp.to_wide(), self.panel.to_wide())
def test_pivot(self):
df = pivot(np.array([1, 2, 3, 4, 5]),
np.array(['a', 'b', 'c', 'd', 'e']),
np.array([1, 2, 3, 5, 4.]))
self.assertEqual(df['a'][1], 1)
self.assertEqual(df['b'][2], 2)
self.assertEqual(df['c'][3], 3)
self.assertEqual(df['d'][4], 5)
self.assertEqual(df['e'][5], 4)
# weird overlap, TODO: test?
df = pivot(np.array([1, 2, 3, 4, 4]),
np.array(['a', 'a', 'a', 'a', 'a']),
np.array([1, 2, 3, 5, 4]))
# corner case, empty
df = pivot(np.array([]), np.array([]), np.array([]))
def test_group_agg():
values = np.ones((10, 2)) * np.arange(10).reshape((10, 1))
bounds = np.arange(5) * 2
f = lambda x: x.mean(axis=0)
agged = group_agg(values, bounds, f)
assert(agged[1][0] == 2.5)
assert(agged[2][0] == 4.5)
def test_monotonic():
pos = np.array([1, 2, 3, 5])
assert panelm._monotonic(pos)
neg = np.array([1, 2, 3, 4, 3])
assert not panelm._monotonic(neg)
neg2 = np.array([5, 1, 2, 3, 4, 5])
assert not | panelm._monotonic(neg2) | pandas.core.panel._monotonic |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generates a synthetic signal for testing ARMA operations
Specific for this case, creates a time-dependent noise in addtion to Fourier signals
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def fourier(freq, a, b, x):
"""
evaluates Fourier expression for a given frequency, amplitudes, and time series
@ In, freq, float, frequency to evaluate
@ In, a, float, sine coefficient
@ In, b, float, cosine coefficient
@ In, x, np.ndarray(float), independent parameter at which to evaluate
@ Out, fourier, Fourier signal given prescribed parameters
"""
sig = 2.0 * np.pi * freq * x
return a * np.sin(sig) + b * np.cos(sig)
def generate(fname, fs, noiseScale):
# script options
## number of elements in history
N = 1000
m = 1000 // 10
## noise scale
#noiseScale = 1.0
## make plots?
plot = True
# PLAN: A B A B C A A B B C
## Fourier Full: (1/100, 1, 1, t)
## Fourier A: (1/5, 2, 0, t)
## Fourier B: (1/5, 0, 2, t)
## Fourier C: (1/3, 2, 2, t)
plan = ['A']*m + ['B']*m + ['A']*m + ['B']*m + ['C']*m + \
['A']*m + ['A']*m + ['B']*m + ['B']*m + ['C']*m
plan = np.array(plan)
maskA = plan == 'A'
maskB = plan == 'B'
maskC = plan == 'C'
## Fourier
t = np.linspace(0, 100, N)
signal = np.zeros(N)
signal += fourier(fs[0][0], fs[0][1], fs[0][2], t)
signal[maskA] += fourier(fs[1][0], fs[1][1], fs[1][2], t[maskA])
signal[maskB] += fourier(fs[2][0], fs[2][1], fs[2][2], t[maskB])
signal[maskC] += fourier(fs[3][0], fs[3][1], fs[3][2], t[maskC])
if plot:
fig, ax = plt.subplots(figsize=(12, 10))
ax.plot(t, signal, '.-', label='Fourier')
# add some random noise
## pure noise
noise = np.random.rand(N) * noiseScale
## time-dependence
#noise *= fourier(1./5., 1.0, 0.0, t)**2
signal += noise
if plot:
ax.plot(t, noise, '.-', label='Noise')
ax.plot(t, signal, '.-', label='Full')
if plot:
ax.set_title('Signal Construction')
ax.legend(loc=0)
ax.set_ylabel('Value')
ax.set_xlabel('Time')
idx = | pd.Index(t, name='Time') | pandas.Index |
import numpy as np
import pandas as pd
import xgboost
from sklearn.metrics import average_precision_score, accuracy_score, precision_recall_curve, roc_auc_score, r2_score
from analyses.plot.plot_utils import plot_confusion_matrix, plot_pr_curve
from analyses.classification.subcompartments import Subcompartments
from sklearn.utils import resample
from training.config import Config
from training.data_utils import get_cumpos
from sklearn.preprocessing import LabelEncoder
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import preprocessing
class DownstreamHelper:
"""
Helper class to run classification experiments.
Includes converting to cumulative indices, merging data with features, augmenting negative labels,
computing classification metrics, custom metrics functions, subc and pca baselines,
methods to fix class imbalance, and mlp regressor.
"""
def __init__(self, cfg):
self.cfg = cfg
self.cell = cfg.cell
self.start_ends = np.load(cfg.hic_path + cfg.start_end_file, allow_pickle=True).item()
self.feature_columns = [str(i) for i in range(0, 16)]
self.chr_len = cfg.genome_len
self.num_subc = 5
self.embed_rows = None
self.pred_rows = None
self.start, self.stop = None, None
def add_cum_pos(self, frame, chr, mode):
"""
add_cum_pos(frame, chr, mode) -> Dataframe
Converts positions to cumulative indices.
Args:
frame (Dataframe): includes position data with respect to chromosome.
chr (int): Current chromosome.
mode (string): one of ends or pos.
"""
"gets cumulative positions"
cum_pos = get_cumpos(self.cfg, chr)
"adds cumpos depending on mode"
if mode == "ends":
pos_columns = ["start", "end"]
elif mode == "pos":
pos_columns = ["pos"]
frame[pos_columns] += cum_pos
return frame
def get_pos_data(self, window_labels, chr):
"""
get_pos_data(window_labels, chr) -> Dataframe
Converts start end type of data to pos data.
Args:
window_labels (Dataframe): includes positions and targets.
chr (int): Current chromosome.
"""
"gets start and end of available data"
start = self.start_ends["chr" + str(chr)]["start"] + get_cumpos(self.cfg, chr)
stop = self.start_ends["chr" + str(chr)]["stop"] + get_cumpos(self.cfg, chr)
"filter"
rna_window_labels = window_labels.loc[
(window_labels["start"] > start) & (window_labels["start"] < stop)].reset_index()
rna_window_labels = rna_window_labels.reset_index(drop=True)
"convert start end to pos"
functional_data = self.get_window_data(rna_window_labels)
return functional_data
def merge_features_target(self, embed_rows, functional_data):
"""
merge_features_target(embed_rows, functional_data) -> Dataframe
Merges representations and task data based on position.
Args:
embed_rows (Dataframe): Representations.
functional_data (Dataframe): includes positions and targets.
"""
"merge representations and task data"
feature_matrix = pd.merge(embed_rows, functional_data, on="pos")
feature_matrix = feature_matrix[(feature_matrix[self.feature_columns] != 0).all(axis=1)]
feature_matrix = feature_matrix.loc[(feature_matrix["target"].isna() != True)]
return feature_matrix
def get_feature_matrix(self, embed_rows, functional_data, chr, mode="ends"):
"""
get_feature_matrix(embed_rows, functional_data, chr, mode) -> Dataframe
Converts task data to pos data and merges with representations.
Args:
embed_rows (Dataframe): Representations.
functional_data (Dataframe): includes positions and targets.
chr (int): The current chromosome
mode (string): one of ends or pos
"""
"gets pos data if ends"
if mode == "ends":
functional_data = self.get_pos_data(functional_data, chr)
"merges representations with task data"
feature_matrix = self.merge_features_target(embed_rows, functional_data)
return feature_matrix
def get_window_data(self, frame):
"""
get_window_data(frame) -> Dataframe
Converts start end data to pos data.
Args:
frame (Dataframe): Start end data.
"""
functional_data = pd.DataFrame(columns=["pos", "target"])
if frame.index[0] == 1:
frame.index -= 1
for i in range(0, frame.shape[0]):
start = frame.loc[i, "start"]
end = frame.loc[i, "end"]
for j in range(start, end + 1):
functional_data = functional_data.append({'pos': j, 'target': frame.loc[i, "target"]},
ignore_index=True)
return functional_data
def get_preds_multi(self, y_hat, y_test):
"""
get_preds_multi(y_hat, y_test) -> Dataframe
gets predicted class by doing argmax.
Args:
y_hat (Array): array of class probabilities
y_test (Array): true test class
"""
pred_data = pd.DataFrame(y_hat)
pred_data['max'] = pred_data.idxmax(axis=1)
pred_data["target"] = np.array(y_test)
pred_data["target"] = pred_data["target"].astype(int)
return pred_data
def get_zero_pos(self, window_labels, col_list, chr):
"""
get_zero_pos(window_labels, col_list, chr) -> Dataframe
Gets negative labels for classification.
Args:
window_labels (Dataframe): array of class probabilities
col_list (list): Contains column names
chr (int): current chromosome
"""
ind_list = []
max_len = self.start_ends["chr" + str(chr)]["stop"]
mask_vec = np.zeros(max_len, bool)
n_run = len(col_list) // 2
if col_list[0] != "pos":
"for start end, keeps track to avoid positions within window."
for i in range(window_labels.shape[0]):
count = 0
for j in range(n_run):
start = window_labels.loc[i, col_list[count]]
count += 1
end = window_labels.loc[i, col_list[count]]
count += 1
if start >= max_len or end >= max_len:
break
for k in range(start, end + 1):
ind_list.append(k)
ind_ar = np.array(ind_list)
else:
"for pos, keeps track to avoid positive positions"
ind_ar = np.array(window_labels["pos"])
"masking"
mask_vec[ind_ar] = True
zero_vec = np.invert(mask_vec)
zero_ind = np.nonzero(zero_vec)
"class balancing"
if window_labels.shape[0] <= len(zero_ind[0]):
zero_ind = zero_ind[0][:window_labels.shape[0]]
"create dataframe"
zero_frame = pd.DataFrame(np.transpose(zero_ind), columns=['pos'])
zero_frame["target"] = pd.Series(np.zeros(len(zero_frame))).astype(int)
return zero_frame
def precision_function(self, pred_data, num_classes):
"""
precision_function(pred_data, num_classes) -> float, float
Custom precision function for multiclass classification. Computes mAP and fscore.
Args:
pred_data (Dataframe): dataframe with true and predicted classes
num_classes (int): Total number of classes
"""
"initialize"
ap_list = []
fscore_list = []
rec_levels = np.linspace(0, 1, num=11)
"average for all classes"
for cls in range(num_classes):
"get ranked probs"
ranked_prob = np.array(pred_data.loc[:, cls]).argsort()[
::-1]
max_cls = pred_data.iloc[ranked_prob]["max"]
target_cls = pred_data.iloc[ranked_prob]["target"]
perf = pd.DataFrame(columns=["TP", "FP", "FN", "P", "R"])
"populate TP, FP, FN, P, and R"
for r in range(pred_data.shape[0]):
if max_cls[r] == cls and target_cls[r] == cls:
perf.loc[r, "TP"] = 1
perf.loc[r, "FN"] = 0
perf.loc[r, "FP"] = 0
elif max_cls[r] != cls and target_cls[r] == cls:
perf.loc[r, "FN"] = 1
perf.loc[r, "TP"] = 0
perf.loc[r, "FP"] = 0
elif max_cls[r] == cls and target_cls[r] != cls:
perf.loc[r, "FP"] = 1
perf.loc[r, "TP"] = 0
perf.loc[r, "FN"] = 0
elif max_cls[r] != cls and target_cls[r] != cls:
perf.loc[r, "FP"] = 0
perf.loc[r, "TP"] = 0
perf.loc[r, "FN"] = 0
perf.loc[r, "R"] = 0
perf.loc[r, "P"] = 0
continue
TP = (perf.iloc[:r + 1]["TP"]).sum()
FP = (perf.iloc[:r + 1]["FP"]).sum()
FN = (perf.iloc[:r + 1]["FN"]).sum()
if (TP + FP) != 0:
perf.loc[r, "P"] = TP / (TP + FP)
else:
perf.loc[r, "P"] = 0
if (TP + FN) != 0:
perf.loc[r, "R"] = TP / (TP + FN)
else:
perf.loc[r, "R"] = 0
"get precision lists for recall levels"
prec_lists = [perf.loc[perf['R'] >= i, 'P'].tolist() for i in rec_levels]
"get maxAP for each recall level"
maxAP = [max(pl) if pl else 0 for pl in prec_lists]
"compute meanAP and fscore"
if maxAP != []:
meanAP = np.sum(maxAP) / len(rec_levels)
fscore = np.mean(2 * np.array(maxAP) * rec_levels / (np.array(maxAP) + rec_levels))
else:
meanAP = 0
fscore = 0
ap_list.append(meanAP)
fscore_list.append(fscore)
"meanAP and mean fscore"
mean_ap = np.mean(ap_list)
mean_fscore = np.mean(fscore_list)
return mean_ap, mean_fscore
def calculate_map(self, feature_matrix):
"""
calculate_map(feature_matrix) -> float, float, float, float
Uses xgboost to run classification with features and targets.
Works with binary or multiclass classification.
Computes mAP (AuPR), AuROC, Accuaracy, and F-score as classification metrics.
Args:
feature_matrix (Dataframe): dataframe with features and targets
"""
"if experiment is subc baseline, set number of features to 5."
cfg = self.cfg
if cfg.class_experiment == "subc_baseline":
feature_size = self.num_subc
else:
feature_size = self.cfg.pos_embed_size
"initialize"
n_folds = cfg.n_folds
mean_map, mean_accuracy, mean_f_score, mean_auroc = 0, 0, 0, 0
average_precisions = np.zeros(n_folds)
f_score = np.zeros(n_folds)
accuarcy = np.zeros(n_folds)
auroc = np.zeros(n_folds)
"prepare feature matrix"
feature_matrix = feature_matrix.dropna()
feature_matrix = feature_matrix.sample(frac=1)
predictions = | pd.DataFrame() | pandas.DataFrame |
from hydroDL.data import dbBasin
import json
import os
from hydroDL import kPath
import numpy as np
import pandas as pd
sd = '1982-01-01'
ed = '2018-12-31'
dataName = 'test'
dictSiteName = 'dict{}.json'.format(dataName)
dirSel = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteSel')
with open(os.path.join(dirSel, dictSiteName)) as f:
dictSite = json.load(f)
siteNoLst = dictSite['rmTK'][:10]
# DF = dbBasin.DataFrameBasin.new(dataName, siteNoLst, sdStr=sd, edStr=ed)
DF = dbBasin.DataFrameBasin(dataName)
# normalization
DFN = dbBasin.func.localNorm(DF, subset='all')
DFN.saveAs(dataName+'N')
seed = 0
rate = 0.2
rng = np.random.default_rng(seed)
# random subset
mask = np.ones([len(DF.t), len(DF.siteNoLst)]).astype(bool)
for indS, siteNo in enumerate(DF.siteNoLst):
obsB = np.any(~np.isnan(DF.c[:, indS, :]), axis=1)
obsD = np.where(obsB)[0]
nPick = int(sum(obsB*rate))
ind = rng.choice(obsD, nPick, replace=False)
mask[ind, indS] = False
DF.saveSubset('pkR20', mask=mask)
DF.saveSubset('rmR20', mask=~mask)
# last 20% subset
mask = np.ones([len(DF.t), len(DF.siteNoLst)]).astype(bool)
for indS, siteNo in enumerate(DF.siteNoLst):
obsB = np.any(~np.isnan(DF.c[:, indS, :]), axis=1)
obsD = np.where(obsB)[0]
nPick = int(sum(obsB*rate))
ind = obsD[-nPick:]
mask[ind, indS] = False
DF.saveSubset('pkL20', mask=mask)
DF.saveSubset('rmL20', mask=~mask)
# pick by year
yrIn = np.arange(1985, 2020, 5).tolist()
t1 = dbBasin.func.pickByYear(DF.t, yrIn)
t2 = dbBasin.func.pickByYear(DF.t, yrIn, pick=False)
DF.createSubset('pkYr5', dateLst=t1)
DF.createSubset('rmYr5', dateLst=t2)
# before after 2010
DF.saveSubset('B10', ed='2009-12-31')
DF.saveSubset('A10', sd='2010-01-01')
# examine test/train rate
trainLst = ['rmYr5', 'rmR20', 'rmL20', 'rmRT20', 'B10']
testLst = ['rmYr5', 'pkR20', 'pkL20', 'pkRT20', 'A10']
df = | pd.DataFrame(index=DF.varC, columns=trainLst) | pandas.DataFrame |
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
import sys
import glob
import pandas as pd
import numpy as np
from sklearn import decomposition
import deprecated
import logging
sys.path.append(root_path)
from config.globalLog import logger
def generate_monoscale_samples(source_file, save_path, lags_dict, column, test_len, lead_time=1,regen=False):
"""Generate learning samples for autoregression problem using original time series.
Args:
'source_file' -- ['String'] The source data file path.
'save_path' --['String'] The path to restore the training, development and testing samples.
'lags_dict' -- ['int dict'] The lagged time for original time series.
'column' -- ['String']The column's name for read the source data by pandas.
'test_len' --['int'] The length of development and testing set.
'lead_time' --['int'] The lead time.
"""
logger.info('Generating muliti-step decomposition-ensemble hindcasting samples')
save_path = save_path+'/'+str(lead_time)+'_ahead_pacf/'
logger.info('Source file:{}'.format(source_file))
logger.info('Save path:{}'.format(save_path))
if not os.path.exists(save_path):
os.makedirs(save_path)
if len(os.listdir(save_path))>0 and not regen:
logger.info('Learning samples have been generated!')
else:
# Load data from local dick
if '.xlsx' in source_file:
dataframe = pd.read_excel(source_file)[column]
elif '.csv' in source_file:
dataframe = pd.read_csv(source_file)[column]
# convert pandas dataframe to numpy array
nparr = np.array(dataframe)
# Create an empty pandas Dataframe
full_samples = pd.DataFrame()
# Generate input series based on lag and add these series to full dataset
lag = lags_dict['ORIG']
for i in range(lag):
x = pd.DataFrame(nparr[i:dataframe.shape[0] -
(lag - i)], columns=['X' + str(i + 1)])
x = x.reset_index(drop=True)
full_samples = pd.concat([full_samples, x], axis=1, sort=False)
# Generate label data
label = pd.DataFrame(nparr[lag+lead_time-1:], columns=['Y'])
label = label.reset_index(drop=True)
full_samples = full_samples[:full_samples.shape[0]-(lead_time-1)]
full_samples = full_samples.reset_index(drop=True)
# Add labled data to full_data_set
full_samples = pd.concat([full_samples, label], axis=1, sort=False)
# Get the length of this series
series_len = full_samples.shape[0]
# Get the training and developing set
train_dev_samples = full_samples[0:(series_len - test_len)]
# Get the testing set.
test_samples = full_samples[(series_len - test_len):series_len]
# train_dev_len = train_dev_samples.shape[0]
train_samples = full_samples[0:(series_len - test_len - test_len)]
dev_samples = full_samples[(
series_len - test_len - test_len):(series_len - test_len)]
assert (train_samples.shape[0] + dev_samples.shape[0] +
test_samples.shape[0]) == series_len
# Get the max and min value of each series
series_max = train_samples.max(axis=0)
series_min = train_samples.min(axis=0)
# Normalize each series to the range between -1 and 1
train_samples = 2 * (train_samples - series_min) / \
(series_max - series_min) - 1
dev_samples = 2 * (dev_samples - series_min) / \
(series_max - series_min) - 1
test_samples = 2 * (test_samples - series_min) / \
(series_max - series_min) - 1
logger.info('Series length:{}'.format(series_len))
logger.info('Series length:{}'.format(series_len))
logger.info(
'Training-development sample size:{}'.format(train_dev_samples.shape[0]))
logger.info('Training sample size:{}'.format(train_samples.shape[0]))
logger.info('Development sample size:{}'.format(dev_samples.shape[0]))
logger.info('Testing sample size:{}'.format(test_samples.shape[0]))
series_max = | pd.DataFrame(series_max, columns=['series_max']) | pandas.DataFrame |
import pandas as pd
def process(x
:
pd.DataFrame
) -> pd.DataFrame:
y = | pd.concat([x, x]) | pandas.concat |
"""
Import, format and tidy data.
This script imports the data stored in the "data" folder, and save a .csv (or .nc, TBD.) with the formatted dataset.
The data sources can vary, therefore, the script needs to identify the source (not explicitly provided) and adapt to
them. Some meta-data (e.g. the podcast name) is in the file name itself, so the script needs to identify it as well.
"""
import numpy as np
import pandas as pd
import datetime as dt
from datetime import datetime
import glob
import re
import pickle
import spotipy
from spotipy.oauth2 import SpotifyOAuth
import os
DATA_FOLDER = 'G:/My Drive/Podcast/PNB/data/'
# DATA_FOLDER = 'C:/Users/Pedro/Desktop/Data analisys Podcast/data/'
OUTPUT_FOLDER = DATA_FOLDER + 'formatted/'
os.environ['SPOTIPY_CLIENT_ID'] = 'f001dec668494347ad43adb1accd9097'
os.environ['SPOTIPY_CLIENT_SECRET'] = 'INSERT_SECRET'
os.environ['SPOTIPY_REDIRECT_URI'] = 'http://localhost'
def debug_time(date_str):
if date_str[-8:-6] != '24':
return pd.to_datetime(date_str, format='%m/%d/%Y %H:%M:%S')
date_str = date_str[:-8] + '00' + date_str[-6:]
return pd.to_datetime(date_str, format='%m/%d/%Y %H:%M:%S') + dt.timedelta(days=1)
def import_total_plays(path, source):
"""
Import a "total plays" file.
:param path: str, path to file
:param source: str, source of the file, e.g. 'Anchor'.
:return: pandas dataframe with columns 'time' and 'plays', with '1D' interval.
"""
sources_list = ['test', 'Anchor']
assert type(path) == str, 'path must be a string: %r' % path
assert type(source) == str, 'source must be a string: %r' % source
assert source in sources_list, 'Source %r not implemented. Make sure the spelling is correct.' % source
if source == 'test':
data = None
if source == 'Anchor':
# by default, %H is 00-23, however we do not need the time info as it is daily.
anchor_date_parser = lambda x: datetime.strptime(x, '%m/%d/%Y 24:00:00')
data = pd.read_csv(path, names=['time', 'plays'], header=0, parse_dates=['time'],
date_parser=anchor_date_parser)
return data
def import_top_eps(path, source):
"""
Import a "top episodes" file.
:param path: str, path to file
:param source: str, source of the file, e.g. 'Anchor'.
:return: pandas dataframe with columns 'time' and 'plays', with '1D' interval.
"""
sources_list = ['test', 'Anchor']
assert type(path) == str, 'path must be a string: %r' % path
assert type(source) == str, 'source must be a string: %r' % source
assert source in sources_list, 'Source %r not implemented. Make sure the spelling is correct.' % source
if source == 'test':
data = None
if source == 'Anchor':
data = pd.read_csv(path, names=['title', 'plays', 'time'], header=0)
data.time = data.time.apply(debug_time)
return data
def import_plays_by_device(path, source):
"""
Import a "plays by devices" file.
:param path: str, path to file
:param source: str, source of the file, e.g. 'Anchor'.
:return: pandas dataframe with columns 'time' and 'plays', with '1D' interval.
"""
sources_list = ['test', 'Anchor']
assert type(path) == str, 'path must be a string: %r' % path
assert type(source) == str, 'source must be a string: %r' % source
assert source in sources_list, 'Source %r not implemented. Make sure the spelling is correct.' % source
if source == 'test':
data = None
if source == 'Anchor':
data = pd.read_csv(path, names=['device', 'plays_perc'], header=0)
return data
def import_plays_by_app(path, source):
"""
Import a "plays by app" file.
:param path: str, path to file
:param source: str, source of the file, e.g. 'Anchor'.
:return: pandas dataframe with columns 'time' and 'plays', with '1D' interval.
"""
sources_list = ['test', 'Anchor']
assert type(path) == str, 'path must be a string: %r' % path
assert type(source) == str, 'source must be a string: %r' % source
assert source in sources_list, 'Source %r not implemented. Make sure the spelling is correct.' % source
if source == 'test':
data = None
if source == 'Anchor':
data = pd.read_csv(path, names=['app', 'plays_perc'], header=0)
return data
def import_geolocation(path, source):
"""
Import a "geolocation" file.
:param path: str, path to file
:param source: str, source of the file, e.g. 'Anchor'.
:return: pandas dataframe with columns 'time' and 'plays', with '1D' interval.
"""
sources_list = ['test', 'Anchor']
assert type(path) == str, 'path must be a string: %r' % path
assert type(source) == str, 'source must be a string: %r' % source
assert source in sources_list, 'Source %r not implemented. Make sure the spelling is correct.' % source
if source == 'test':
data = None
if source == 'Anchor':
data = pd.read_csv(path, names=['location', 'plays_perc'], header=0)
return data
def identify_source_and_content():
"""
Identify the source and content of the files in the data folder, import data and return a ndarray with the filepath,
podcast name, content type, source, and data itself.
:return: ndarray with data and metadata
"""
filepaths = glob.glob(DATA_FOLDER + '*.csv')
files = []
for n, filepath in enumerate(filepaths):
source = None
content = None
if re.search('TotalPlays_all-time', filepath):
content = 'TotalPlays'
source = 'Anchor'
data = import_total_plays(filepath, source)
podcast = re.search('(?<=\\\\)(.*)(?=_TotalPlays_all-time)', filepath).group(1)
if re.search('TopEpisodes_all-time', filepath):
content = 'TopEpisodes'
source = 'Anchor'
data = import_top_eps(filepath, source)
podcast = re.search('(?<=\\\\)(.*)(?=_TopEpisodes_all-time)', filepath).group(1)
if re.search('PlaysByDevice_all-time', filepath):
content = 'PlaysByDevice'
source = 'Anchor'
data = import_plays_by_device(filepath, source)
podcast = re.search('(?<=\\\\)(.*)(?=_PlaysByDevice_all-time)', filepath).group(1)
if re.search('PlaysByApp_all-time', filepath):
content = 'PlaysByApp'
source = 'Anchor'
data = import_plays_by_app(filepath, source)
podcast = re.search('(?<=\\\\)(.*)(?=_PlaysByApp_all-time)', filepath).group(1)
if re.search('GeoLocation_all-time', filepath):
content = 'GeoLocation'
source = 'Anchor'
data = import_geolocation(filepath, source)
podcast = re.search('(?<=\\\\)(.*)(?=_GeoLocation_all-time)', filepath).group(1)
aggr = [filepath, podcast, content, source, data]
files.append(aggr)
return np.array(files, dtype=object)
def aggregate_data(files):
"""
Aggregate data from identify_source_and_content() by content, creating the variable "podcast". Returns a dict with
content in keys and pd.dataframes in values.
:param files: ndarray from identify_source_and_content()
:return: dict of pd.dataframes
"""
contents = np.unique(files[:, 2])
data = {}
for content in contents:
filter_arr = files[:, 2] == content
content_file = files[filter_arr, :]
for n_podcast in range(len(content_file)):
content_file[n_podcast, 4]['podcast'] = content_file[n_podcast, 1]
data[content] = pd.concat(content_file[:, 4])
return data
def import_spotify_meta():
scope = 'user-read-playback-position'
spotify = spotipy.Spotify(client_credentials_manager=SpotifyOAuth(scope=scope))
spotify_meta = pd.DataFrame(
columns=['ep_name', 'ep_release_date', 'ep_description', 'ep_duration_ms', 'ep_images', 'podcast'])
metadata = | pd.read_csv(DATA_FOLDER + 'spotify_meta.txt') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 5 00:37:32 2019
@author: tungutokyo
"""
import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None
pd.set_option("display.max_columns", 60)
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
from scipy import interp
from itertools import cycle
from sklearn.preprocessing import label_binarize
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import plot_confusion_matrix
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.model_selection import TimeSeriesSplit, GridSearchCV, RandomizedSearchCV
from xgboost import XGBClassifier
def get_RandSearchCV(X_train, y_train, X_test, y_test, scoring, type_search, output_file):
from sklearn.model_selection import TimeSeriesSplit
from datetime import datetime as dt
st_t = dt.now()
# Numer of trees are used
n_estimators = [5, 10, 50, 100, 150, 200, 250, 300]
#n_estimators = list(np.arange(100,1000,50))
#n_estimators = [1000]
# Maximum depth of each tree
max_depth = [5, 10, 25, 50, 75, 100]
# Minimum number of samples per leaf
min_samples_leaf = [1, 2, 4, 8, 10]
# Minimum number of samples to split a node
min_samples_split = [2, 4, 6, 8, 10]
# Maximum numeber of features to consider for making splits
max_features = ["auto", "sqrt", "log2", None]
hyperparameter = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_leaf': min_samples_leaf,
'min_samples_split': min_samples_split,
'max_features': max_features}
cv_timeSeries = TimeSeriesSplit(n_splits=5).split(X_train)
base_model_rf = RandomForestClassifier(criterion="gini", random_state=42)
base_model_gb = GradientBoostingClassifier(criterion="friedman_mse", random_state=42)
# Run randomzed search
n_iter_search = 30
if type_search == "RandomSearchCV-RandomForest":
rsearch_cv = RandomizedSearchCV(estimator=base_model_rf,
random_state=42,
param_distributions=hyperparameter,
n_iter=n_iter_search,
cv=cv_timeSeries,
scoring=scoring,
n_jobs=-1)
else:
rsearch_cv = RandomizedSearchCV(estimator=base_model_gb,
random_state=42,
param_distributions=hyperparameter,
n_iter=n_iter_search,
cv=cv_timeSeries,
scoring=scoring,
n_jobs=-1)
rsearch_cv.fit(X_train, y_train)
#f = open("output.txt", "a")
print("Best estimator obtained from CV data: \n", rsearch_cv.best_estimator_, file=output_file)
print("Best Score: ", rsearch_cv.best_score_, file=output_file)
return rsearch_cv
def performance_rand(best_clf, X_train, y_train, X_test, y_test, type_search, num_class, output_file, class_name):
#f = open("output.txt", "a")
print("-"*100)
print("~~~~~~~~~~~~~~~~~~ PERFORMANCE EVALUATION ~~~~~~~~~~~~~~~~~~~~~~~~", file=output_file)
print("Detailed report for the {} algorithm".format(type_search), file=output_file)
best_clf.fit(X_train, y_train)
y_pred = best_clf.predict(X_test)
y_pred_prob = best_clf.predict_proba(X_test)
test_accuracy = accuracy_score(y_test, y_pred, normalize=True) * 100
points = accuracy_score(y_test, y_pred, normalize=False)
print("The number of accurate predictions out of {} data points on unseen data is {}".format(
X_test.shape[0], points), file=output_file)
print("Accuracy of the {} model on unseen data is {}".format(
type_search, np.round(test_accuracy, 2)), file=output_file)
print("Precision of the {} model on unseen data is {}".format(
type_search, np.round(metrics.precision_score(y_test, y_pred, average="macro"), 4)), file=output_file)
print("Recall of the {} model on unseen data is {}".format(
type_search, np.round(metrics.recall_score(y_test, y_pred, average="macro"), 4)), file=output_file)
print("F1 score of the {} model on unseen data is {}".format(
type_search, np.round(metrics.f1_score(y_test, y_pred, average="macro"), 4)), file=output_file)
print("\nClassification report for {} model: \n".format(type_search), file=output_file)
print(metrics.classification_report(y_test, y_pred), file=output_file)
plt.figure(figsize=(12,12))
cnf_matrix = metrics.confusion_matrix(y_test, y_pred)
cnf_matrix_norm = cnf_matrix.astype('float') / cnf_matrix.sum(axis=1)[:, np.newaxis]
print("\nThe Confusion Matrix: \n", file=output_file)
print(cnf_matrix, file=output_file)
#class_name = ["CDI", "ignore-nonCDI", "Health"]
#class_name = ["CRC", "Adenomas", "Health"]
# class_name = ["OB", "OW", "Health"]
class_name = class_name
cmap = plt.cm.Blues
plt.imshow(cnf_matrix_norm, interpolation="nearest", cmap=cmap)
plt.colorbar()
fmt = ".2g"
thresh = cnf_matrix_norm.max()/2
for i, j in itertools.product(range(cnf_matrix_norm.shape[0]), range(cnf_matrix_norm.shape[1])):
plt.text(j,i,format(cnf_matrix_norm[i,j], fmt), ha="center", va="center",
color="white" if cnf_matrix_norm[i,j] > thresh else "black", fontsize=35)
plt.xticks(np.arange(num_class), labels = class_name, fontsize=30)
plt.yticks(np.arange(num_class), labels = class_name, fontsize=30)
plt.ylabel("True label", fontsize=30)
plt.xlabel("Predicted label", fontsize=30)
plt.ylim((num_class - 0.5, -0.5))
plt.show()
#plt.setp(ax.get_xticklabels(), rotation=xticks_rotation)
"""
cmap = plt.cm.Blues
sns.heatmap(cnf_matrix_norm, annot=True, cmap=cmap, fmt=".2f", annot_kws={"size":15}, linewidths=.05)
if type_search == "RandomSearchCV-RandomForest":
plt.title("The Normalized Confusion Matrix - {}".format("RandomForest"), fontsize=20)
else:
plt.title("The Normalized Confusion Matrix - {}".format("GradientBoosting"), fontsize=20)
plt.ylabel("True label", fontsize=15)
plt.xlabel("Predicted label", fontsize=15)
plt.show()
"""
print("\nROC curve and AUC")
y_pred = best_clf.predict(X_test)
y_pred_prob = best_clf.predict_proba(X_test)
y_test_cat = np.array(pd.get_dummies(y_test))
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(num_class):
fpr[i], tpr[i], _ = metrics.roc_curve(y_test_cat[:,i], y_pred_prob[:,i])
roc_auc[i] = metrics.auc(fpr[i], tpr[i])
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(num_class)]))
mean_tpr = np.zeros_like(all_fpr)
for i in range(num_class):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
mean_tpr /= num_class
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = metrics.auc(fpr["macro"], tpr["macro"])
plt.figure(figsize=(12,12))
plt.plot(fpr["macro"], tpr["macro"],
label = "macro-average ROC curve with AUC = {} - Accuracy = {}%".format(
round(roc_auc["macro"], 2), round(test_accuracy, 2)),
color = "navy", linestyle=":", linewidth=4)
colors = cycle(["red", "orange", "blue", "pink", "green"])
for i, color in zip(range(num_class), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=2,
label = "ROC curve of class {0} (AUC = {1:0.2f})".format(i, roc_auc[i]))
plt.plot([0,1], [0,1], "k--", lw=2)
plt.title("ROC-AUC for Random Forest".format(type_search), fontsize=20)
plt.xlabel("False Positive Rate", fontsize=15)
plt.ylabel("True Positive Rate", fontsize=15)
plt.legend(loc="lower right")
plt.show()
importances = best_clf.feature_importances_
indices = np.argsort(importances)[::-1]
return {"importance": importances,
"index": indices,
"y_pred": y_pred,
"y_pred_prob": y_pred_prob}
def RF_classifier(X_train, y_train, X_test, y_test, scoring, type_search, num_class, output_file, top_feature, class_name):
#f = open("output.txt", "a")
print("*"*100)
print("Starting {} steps with {} for evaluation rules...".format(type_search, scoring))
print("*"*100)
rsearch_cv = get_RandSearchCV(X_train, y_train, X_test, y_test, scoring, type_search, output_file)
best_estimator = rsearch_cv.best_estimator_
max_depth = rsearch_cv.best_estimator_.max_depth
n_estimators = rsearch_cv.best_estimator_.n_estimators
var_imp_rf = performance_rand(best_estimator, X_train, y_train, X_test, y_test, type_search,
num_class, output_file, class_name)
print("\n~~~~~~~~~~~~~ Features ranking and ploting ~~~~~~~~~~~~~~~~~~~~~\n", file=output_file)
importances_rf = var_imp_rf["importance"]
indices_rf = var_imp_rf["index"]
y_pred = var_imp_rf["y_pred"]
feature_tab = pd.DataFrame({"Features" : list(X_train.columns),
"Importance": importances_rf})
feature_tab = feature_tab.sort_values("Importance", ascending = False).reset_index(drop=True)
print(feature_tab, file=output_file)
#index = np.arange(len(X_train.columns))
#importance_desc = sorted(importances_rf)
index = feature_tab["Features"].iloc[:top_feature]
importance_desc = feature_tab["Importance"].iloc[:top_feature]
feature_space = []
for i in range(indices_rf.shape[0]-1, -1, -1):
feature_space.append(X_train.columns[indices_rf[i]])
fig, ax = plt.subplots(figsize=(20,25))
ax = plt.gca()
plt.title("Feature importances of Random Forest".format(type_search), fontsize=30)
plt.barh(index, importance_desc, align="center", color="blue", alpha=0.6)
plt.grid(axis="x", color="white", linestyle="-")
plt.yticks(fontsize=30)
plt.xticks(fontsize=20)
plt.xlabel("The Average of Decrease in Impurity", fontsize=20)
plt.ylabel("Features", fontsize=30)
ax.tick_params(axis="both", which="both", length=0)
plt.show()
return {"Main Results": var_imp_rf,
"Importance Features": feature_tab}
###############################################################################
################### Stratified K-Folds cross-validator ########################
###############################################################################
def RF_SKF(rf, X, y, num_cv = 5, random_state = 42):
skf = StratifiedKFold(n_splits = 5, shuffle = True, random_state = 42)
test_accuracies = 0
test_precisions = 0
test_recalls = 0
test_f1s = 0
cv_count = 0
# rf = RandomForestClassifier(n_estimators = 100)
for train, test in skf.split(X,y):
probas_ = rf.fit(X.iloc[train], y.iloc[train]).predict_proba(X.iloc[test])
y_pred = rf.predict(X.iloc[test])
test_accuracy = metrics.accuracy_score(y.iloc[test], y_pred, normalize = True) * 100
test_accuracies += test_accuracy
test_precision = metrics.precision_score(y.iloc[test], y_pred, average="macro")
test_precisions += test_precision
test_recall_score = metrics.recall_score(y.iloc[test], y_pred, average="macro")
test_recalls += test_recall_score
test_f1_score = metrics.f1_score(y.iloc[test], y_pred, average="macro")
test_f1s += test_f1_score
cv_count += 1
test_accuracies /= cv_count
test_precisions /= cv_count
test_recalls /= cv_count
test_f1s /= cv_count
return {i: j for i, j in
zip(("Accuracy", "Precision_Score", "Recall_Score", "F1_Score"),
(test_accuracies, test_precisions, test_recalls, test_f1s))}
def RF_SKF_search(X, y, n_est, crit, max_depth, min_split, min_leaf, max_feature,
num_cv = 5, random_state = 42, report_loop = True):
print(n_est, crit, min_split, min_leaf)
rf = RandomForestClassifier(n_estimators = n_est,
max_depth = max_depth,
criterion = crit,
min_samples_split = min_split,
min_samples_leaf = min_leaf,
max_features = max_feature,
random_state = random_state)
# Cross_validated results
try:
results = RF_SKF(rf, X, y, num_cv = num_cv, random_state = random_state)
except:
results = {"Accuracy": np.nan}
# Get oob_score for non-cross validated results
rf = RandomForestClassifier(n_estimators = n_est,
max_depth = max_depth,
criterion = crit,
min_samples_split = min_split,
min_samples_leaf = min_leaf,
max_features = max_feature,
random_state = random_state,
oob_score = True)
try:
score = rf.fit(X, y).oob_score_
except:
score = np.nan
if report_loop == True:
print("Finished. (Accuracy = {:.2f}%)".format(results["Accuracy"]))
return [n_est, crit, max_depth, min_split, min_leaf, max_feature,
results["Accuracy"], results["Precision_Score"], results["Recall_Score"], results["F1_Score"], score]
def RF_SKF_run(X, y, report_loop = True):
# Numer of trees are used
n_estimators = [50, 100, 150, 200, 250, 300]
criterion = ["gini", "entropy"]
# Maximum depth of each tree
max_depths = [5, 10, 25, 50, 75, 100]
# Minimum number of samples per leaf
min_samples_leaf = [1, 2, 4, 8, 10]
# Minimum number of samples to split a node
min_samples_split = [2, 4, 6, 8, 10]
# Maximum numeber of features to consider for making splits
max_featuress = ["auto", "sqrt", "log2", None]
random_state = 42
rf_result_all = []
for crit in criterion:
for min_split in min_samples_split:
for min_leaf in min_samples_leaf:
for n_est in n_estimators:
for max_depth in max_depths:
for max_features in max_featuress:
rf_result = RF_SKF_search(X, y, n_est, crit, max_depth,
min_split, min_leaf, max_features, random_state,
report_loop = report_loop)
rf_result_all.append(rf_result)
rf_result_df = pd.DataFrame(rf_result_all,
columns = ["n_estimators", "criterion", "max_depth",
"min_samples_split", "min_samples_leaf", "max_features",
"Accurancy", "Precision_Score", "Recall_Score", "F1_score",
"oob_score"]).sort_values("Accurancy", ascending = False).reset_index(drop=True)
return rf_result_df
def performance_SKF(best_clf, X_train, y_train, X_test, y_test, type_search, num_class, output_file):
#f = open("output.txt", "a")
print("-"*100)
print("~~~~~~~~~~~~~~~~~~ PERFORMANCE EVALUATION ~~~~~~~~~~~~~~~~~~~~~~~~", file=output_file)
print("Detailed report for the {} algorithm".format(type_search), file=output_file)
best_clf.fit(X_train, y_train)
y_pred = best_clf.predict(X_test)
y_pred_prob = best_clf.predict_proba(X_test)
test_accuracy = accuracy_score(y_test, y_pred, normalize=True) * 100
points = accuracy_score(y_test, y_pred, normalize=False)
print("The number of accurate predictions out of {} data points on unseen data is {}".format(
X_test.shape[0], points), file=output_file)
print("Accuracy of the {} model on unseen data is {}".format(
type_search, np.round(test_accuracy, 2)), file=output_file)
print("Precision of the {} model on unseen data is {}".format(
type_search, np.round(metrics.precision_score(y_test, y_pred, average="macro"), 4)), file=output_file)
print("Recall of the {} model on unseen data is {}".format(
type_search, np.round(metrics.recall_score(y_test, y_pred, average="macro"), 4)), file=output_file)
print("F1 score of the {} model on unseen data is {}".format(
type_search, np.round(metrics.f1_score(y_test, y_pred, average="macro"), 4)), file=output_file)
print("\nClassification report for {} model: \n".format(type_search), file=output_file)
print(metrics.classification_report(y_test, y_pred), file=output_file)
plt.figure(figsize=(12,12))
cnf_matrix = metrics.confusion_matrix(y_test, y_pred)
cnf_matrix_norm = cnf_matrix.astype('float') / cnf_matrix.sum(axis=1)[:, np.newaxis]
print("\nThe Confusion Matrix: \n", file=output_file)
print(cnf_matrix, file=output_file)
"""
cmap = plt.cm.Blues
plt.imshow(cnf_matrix, interpolation="nearest", cmap=cmap)
plt.colorbar()
fmt = "d"
thresh = cnf_matrix.max()/2
for i, j in itertools.product(range(cnf_matrix.shape[0]), range(cnf_matrix.shape[1])):
plt.text(j,i,format(cnf_matrix[i,j], fmt), ha="center", va="center",
color="white" if cnf_matrix[i,j] > thresh else "black")
"""
cmap = plt.cm.Blues
sns.heatmap(cnf_matrix_norm, annot=True, cmap=cmap, fmt=".2f", annot_kws={"size":15}, linewidths=.05)
plt.title("The Normalized Confusion Matrix - {}".format(type_search), fontsize=20)
plt.ylabel("True label", fontsize=15)
plt.xlabel("Predicted label", fontsize=15)
plt.show()
print("\nROC curve and AUC")
y_pred = best_clf.predict(X_test)
y_pred_prob = best_clf.predict_proba(X_test)
y_test_cat = np.array(pd.get_dummies(y_test))
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(num_class):
fpr[i], tpr[i], _ = metrics.roc_curve(y_test_cat[:,i], y_pred_prob[:,i])
roc_auc[i] = metrics.auc(fpr[i], tpr[i])
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(num_class)]))
mean_tpr = np.zeros_like(all_fpr)
for i in range(num_class):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
mean_tpr /= num_class
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = metrics.auc(fpr["macro"], tpr["macro"])
plt.figure(figsize=(12,12))
plt.plot(fpr["macro"], tpr["macro"],
label = "macro-average ROC curve with AUC = {} - Accuracy = {}%".format(
round(roc_auc["macro"], 2), round(test_accuracy, 2)),
color = "navy", linestyle=":", linewidth=4)
colors = cycle(["red", "orange", "blue", "pink", "green"])
for i, color in zip(range(num_class), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=2,
label = "ROC curve of class {0} (AUC = {1:0.2f})".format(i, roc_auc[i]))
plt.plot([0,1], [0,1], "k--", lw=2)
plt.title("ROC-AUC for {}".format(type_search), fontsize=20)
plt.xlabel("False Positive Rate", fontsize=15)
plt.ylabel("True Positive Rate", fontsize=15)
plt.legend(loc="lower right")
plt.show()
importances = best_clf.feature_importances_
indices = np.argsort(importances)[::-1]
return {"importance": importances,
"index": indices,
"y_pred": y_pred,
"y_pred_prob": y_pred_prob}
###############################################################################
################ Forward algorithm to variable selection ######################
###############################################################################
def random_forest_forward(X_train, y_train, X_test, y_test, n_selected_features = 1000, scoring='accuracy'):
from sklearn.model_selection import TimeSeriesSplit
from datetime import datetime as dt
import warnings
warnings.filterwarnings("ignore")
st_t = dt.now()
n_samples, n_features = X_train.shape
n_estimators = [5, 10, 50, 100, 150, 200, 250, 300]
max_depth = [5, 10, 25, 50, 75, 100]
min_samples_leaf = [1, 2, 4, 8, 10]
min_samples_split = [2, 4, 6, 8, 10]
max_features = ["auto", "sqrt", "log2", None]
hyperparameter = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_leaf': min_samples_leaf,
'min_samples_split': min_samples_split,
'max_features': max_features}
cv_timeSeries = TimeSeriesSplit(n_splits=5).split(X_train)
base_model_rf = RandomForestClassifier(criterion='gini', random_state=42)
n_iter_search = 30
scoring = scoring
# selected feature set, initialized to be empty
F = []
count = 0
ddict = {}
all_F = []
all_c = []
all_acc = []
all_model = []
while count < n_selected_features:
max_acc = 0
for i in X_train.columns:
if i not in F:
F.append(i)
X_train_tmp = X_train[F]
acc = 0
rsearch_cv = RandomizedSearchCV(estimator=base_model_rf,
random_state=42,
param_distributions=hyperparameter,
n_iter=n_iter_search,
#cv=cv_timeSeries,
cv=2,
scoring=scoring,
n_jobs=-1)
rsearch_cv.fit(X_train_tmp, y_train)
best_estimator = rsearch_cv.best_estimator_
y_pred = best_estimator.predict(X_test[F])
acc = metrics.accuracy_score(y_test, y_pred)
F.pop()
if acc > max_acc:
max_acc = acc
idx = i
best_model = best_estimator
F.append(idx)
count += 1
print("The current number of features: {} - Accuracy: {}%".format(count, round(max_acc*100, 2)))
all_F.append(np.array(F))
all_c.append(count)
all_acc.append(max_acc)
all_model.append(best_model)
c = pd.DataFrame(all_c)
a = pd.DataFrame(all_acc)
f = pd.DataFrame(all_F)
f["All"] = f[f.columns[0:]].apply(
lambda x: ', '.join(x.dropna().astype(str)), axis=1)
all_info = pd.concat([c, a, f["All"]], axis=1)
all_info.columns = ['Num_feature', 'Accuracy', 'Feature']
all_info = all_info.sort_values(by='Accuracy', ascending=False).reset_index(drop=True)
print("The total time for searching subset: {}".format(dt.now()-st_t))
return all_info, all_model, f
def random_forest_randomforward(X_train, y_train, X_test, y_test,
n_selected_features = 1000, scoring='accuracy', n_iter=1000):
from sklearn.model_selection import TimeSeriesSplit
from datetime import datetime as dt
import random
import warnings
warnings.filterwarnings("ignore")
st_t = dt.now()
n_samples, n_features = X_train.shape
n_estimators = [5, 10, 50, 100, 150, 200, 250, 300]
max_depth = [5, 10, 25, 50, 75, 100]
min_samples_leaf = [1, 2, 4, 8, 10]
min_samples_split = [2, 4, 6, 8, 10]
max_features = ["auto", "sqrt", "log2", None]
hyperparameter = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_leaf': min_samples_leaf,
'min_samples_split': min_samples_split,
'max_features': max_features}
cv_timeSeries = TimeSeriesSplit(n_splits=5).split(X_train)
base_model_rf = RandomForestClassifier(criterion='gini', random_state=42)
n_iter_search = 30
scoring = scoring
# selected feature set, initialized to be empty
count = 0
ddict = {}
all_F = []
all_c = []
all_acc = []
all_model = []
while count < n_selected_features:
#F = []
max_acc = 0
for i in range(n_iter):
col_train = random.sample(list(X_train.columns), count+1)
col_train = np.array(col_train)
X_train_tmp = X_train[col_train]
acc = 0
rsearch_cv = RandomizedSearchCV(estimator=base_model_rf,
random_state=42,
param_distributions=hyperparameter,
n_iter=n_iter_search,
#cv=cv_timeSeries,
cv=2,
scoring=scoring,
n_jobs=-1)
rsearch_cv.fit(X_train_tmp, y_train)
best_estimator = rsearch_cv.best_estimator_
y_pred = best_estimator.predict(X_test[col_train])
acc = metrics.accuracy_score(y_test, y_pred)
if acc > max_acc:
max_acc = acc
idx = col_train
best_model = best_estimator
#F.append(idx)
count += 1
print("The current number of features: {} - Accuracy: {}%".format(count, round(max_acc*100, 2)))
all_F.append(idx)
all_c.append(count)
all_acc.append(max_acc)
all_model.append(best_model)
c = pd.DataFrame(all_c)
a = pd.DataFrame(all_acc)
f = pd.DataFrame(all_F)
f["All"] = f[f.columns[0:]].apply(
lambda x: ', '.join(x.dropna().astype(str)), axis=1)
all_info = | pd.concat([c, a, f["All"]], axis=1) | pandas.concat |
import pandas as pd
import numpy as np
import shutil
import multiprocessing.pool
import matplotlib.dates
import matplotlib.pyplot as plt
import matplotlib.cbook
import matplotlib.cm
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from matplotlib import colors as mcolors
from postgreSQLSuite import *
log = logging.getLogger(__name__)
log.setLevel(DEBUG)
try:
from osgeo import ogr, osr, gdal
except Exception as e:
log.error(f"Error {e} occured while importing osgeo")
pd.options.display.width = 1000
pd.options.display.max_columns=999
pd.options.display.max_rows=100
def _rasterizeTimesliceWorker(df, rasterPath, imagePath, vmin, vmax, dt, xres, yres, perform_rasterization=True):
"""
Timeslices rasterize worker
Rasters a timeslice based on a pd.DataFrame using GDAL by first converting the timeslice to a
OGR vector layer and then rasterizing the content to a raster layer using GDAL
:param df: dictionary containing timeslices in format hour:minute:timeslice
:param rasterPath: path to folder where rasters should be stored
:param imagePath: path to folder where images should be stored
:param vmin: minimum data value (number of available vehicles) on all to-be-rastered dataframes
:param vmax: maximum data value (number of available vehicles) on all to-be-rastered dataframes
:param xres: width of rastered image
:param yres: height of rastered image
:param perform_rasterization: whether or not to raster GDAL layers or just create and store them
"""
driver = ogr.GetDriverByName("ESRI Shapefile")
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
lats = list(df.lat.values)
lons = list(df.lon.values)
values = list(df.countReachable.values)
raster_fn = os.path.join(imagePath, f"{dt.strftime('%d.%m.%y')}-{dt.strftime('%H-%M')}-{xres}-{yres}.tiff")
vector_fn = os.path.join(rasterPath, f"{dt.strftime('%d.%m.%y')}-{dt.strftime('%H-%M')}.shp")
# check if vector layer already exists, otherwise create new one and convert values from df to layer
if not os.path.isfile(vector_fn):
outsrc = driver.CreateDataSource(vector_fn)
outlayer = outsrc.CreateLayer(vector_fn, srs, ogr.wkbPoint)
outlayer.CreateField(ogr.FieldDefn("color_r"), ogr.OFTInteger)
outlayer.CreateField(ogr.FieldDefn("color_g"), ogr.OFTInteger)
outlayer.CreateField(ogr.FieldDefn("color_b"), ogr.OFTInteger)
normalizer = mcolors.Normalize(vmin=vmin, vmax=vmax)
cmap = matplotlib.cm.get_cmap("hot")
for idx in range(len(lats)):
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(float(lats[idx]), float(lons[idx]))
color = cmap(normalizer(values[idx]))
c_r = int(color[0] * 255)
c_g = int(color[1] * 255)
c_b = int(color[2] * 255)
feature = ogr.Feature(outlayer.GetLayerDefn())
feature.SetGeometry(point)
feature.SetField("color_r", c_r)
feature.SetField("color_g", c_g)
feature.SetField("color_b", c_b)
outlayer.CreateFeature(feature)
feature = None # explicitly set feature to None, indicating to OGR that the content should now be stored
outsrc = None # explicitly set vector layer to None, indicating to OGR that the content should now be stored
if perform_rasterization:
NoData_value = 0
# Open the data source and read in the extent
source_ds = ogr.Open(vector_fn)
source_layer = source_ds.GetLayer()
xmin, xmax, ymin, ymax = source_layer.GetExtent()
# Create the destination data source
x_pixel_size = ((xmax - xmin) / xres)
y_pixel_size = ((ymax - ymin) / yres)
target_ds = gdal.GetDriverByName('GTiff').Create(raster_fn, xres, yres, 3, gdal.GDT_Byte)
target_ds.SetGeoTransform((xmin, x_pixel_size, 0, ymax, 0, -y_pixel_size))
# use three bands to encode colors
band1 = target_ds.GetRasterBand(1)
band1.SetNoDataValue(NoData_value)
band2 = target_ds.GetRasterBand(2)
band2.SetNoDataValue(NoData_value)
band3 = target_ds.GetRasterBand(3)
band3.SetNoDataValue(NoData_value)
gdal.RasterizeLayer(target_ds, [1], source_layer, options = ["ATTRIBUTE=color_r", "MERGE_ALG=ADD", "ALL_TOUCHED=TRUE"])
gdal.RasterizeLayer(target_ds, [2], source_layer, options = ["ATTRIBUTE=color_g", "MERGE_ALG=ADD", "ALL_TOUCHED=TRUE"])
gdal.RasterizeLayer(target_ds, [3], source_layer, options = ["ATTRIBUTE=color_b", "MERGE_ALG=ADD", "ALL_TOUCHED=TRUE"])
return dt
def rasterizeTimeslices(timeslices: dict, slice_datetime: datetime.datetime, rasterPath: str, imagePath: str, perform_rasterization=True, xres=1000, yres=1000):
"""
Rasterize timeslices of one day using GDAL
:param timeslices: dictionary containing timeslices in format hour:minute:timeslice
:param slice_datetime: datetime indicating begin of timeslices
:param rasterPath: path to folder where rasters should be stored
:param imagePath: path to folder where images should be stored
:param perform_rasterization: whether or not to raster GDAL layers or just create and store them
:param xres: width of rastered image
:param yres: height of rastered image
"""
log.info(f"Rasterizing timeslices")
if not os.path.isdir(rasterPath):
log.warning(f"{rasterPath} does not exist, attempting to create folder..")
os.mkdir(rasterPath)
if not os.path.isdir(imagePath):
log.warning(f"{imagePath} does not exist, attempting to create folder..")
os.mkdir(imagePath)
maxAgents = 0
minAgents = 4000
for hour in sorted(list(timeslices.keys())):
for minute in timeslices[hour]:
minAgents = min(minAgents, timeslices[hour][minute][timeslices[hour][minute].countReachable > 3].countReachable.min())
maxAgents = max(maxAgents, timeslices[hour][minute].countReachable.max())
multproc = False
hours = sorted(timeslices.keys())
minutes = range(0, 60, 10)
global parsed
parsed = 0
maxParsed = len(hours)*len(minutes)
steps = 10
iter = int(maxParsed / steps)
def callback(result):
dt = result
c_hour = dt.hour
c_minute = dt.minute
global parsed
parsed += 1
numBlocks = int(parsed / (iter + 1)) if parsed != maxParsed else steps
print(f"\rRendering timeslices [" + ''.join(['#' for _ in range(numBlocks)]).ljust(steps) + f"] ({str(c_hour).rjust(2)} {str(c_minute).rjust(2)})", end="", flush=True)
log.info(f"Starting processing (multiprocessing: {multproc})")
if multproc:
pool = multiprocessing.Pool()
for hour in hours:
for minute in sorted(list(timeslices[hour].keys())):
dt = datetime.datetime(year=slice_datetime.year, month=slice_datetime.month, day=slice_datetime.day, hour=hour, minute=minute)
pool.apply_async(_rasterizeTimesliceWorker,
(timeslices[hour][minute], rasterPath, imagePath, minAgents, maxAgents, dt, xres, yres, perform_rasterization),
callback=callback)
pool.close()
pool.join()
else:
for hour in hours:
for minute in minutes:
dt = datetime.datetime(year=slice_datetime.year, month=slice_datetime.month, day=slice_datetime.day, hour=hour, minute=minute)
callback(_rasterizeTimesliceWorker(timeslices[hour][minute], rasterPath, imagePath, minAgents, maxAgents, dt, xres, yres, perform_rasterization))
print()
def rasterizeTimesliceMultipleDays(timeslices_range: dict, perform_rasterization):
"""
Rasterize timeslices over multiple days while keeping consistent color scheme across rasters
timeslices_range shall for each day contain a dictioanry with keys:
- timeslices
- startTime
- endTime
- imagePath
- rasterPath
:param timeslices_range: dictionary containing timeslices and metadata for each day
:param perform_rasterization: whether or not to raster GDAL layers or just create and store them
"""
xres = 1000
yres = 1000
multproc = True
min_agents_range = 4000
max_agents_range = 0
log.info(f"Calculating min and max agents over all timeslices")
for day in timeslices_range:
timeslices = timeslices_range[day]["timeslices"]
for hour in sorted(list(timeslices.keys())):
for minute in timeslices[hour]:
min_agents_range = min(min_agents_range, timeslices[hour][minute][timeslices[hour][minute].countReachable > 3].countReachable.min())
max_agents_range = max(max_agents_range, timeslices[hour][minute].countReachable.max())
log.info(f"min agents: {min_agents_range}, max agents: {max_agents_range}")
hours = range(0,24)
minutes = range(0, 60, 10)
log.info(f"Rasterizing timeslices from {timeslices_range[list(timeslices_range.keys())[0]]['startTime']} to {timeslices_range[list(timeslices_range.keys())[-1]]['startTime']}")
for day in timeslices_range:
timeslices = timeslices_range[day]["timeslices"]
rasterPath = timeslices_range[day]["rasterPath"]
imagePath = timeslices_range[day]["imagePath"]
slice_datetime = timeslices_range[day]["startTime"]
log.info(f"Rasterizing timeslices on day {day}")
if not os.path.isdir(rasterPath):
log.warning(f"{rasterPath} does not exist, attempting to create folder..")
os.mkdir(rasterPath)
if not os.path.isdir(imagePath):
log.warning(f"{imagePath} does not exist, attempting to create folder..")
os.mkdir(imagePath)
global parsed
parsed = 0
maxParsed = len(hours)*len(minutes)
steps = 10
iter = int(maxParsed / steps)
def callback(result):
dt = result
c_hour = dt.hour
c_minute = dt.minute
global parsed
parsed += 1
numBlocks = int(parsed / (iter + 1)) if parsed != maxParsed else steps
print(f"\rRendering timeslices [" + ''.join(['#' for _ in range(numBlocks)]).ljust(steps) + f"] ({str(c_hour).rjust(2)} {str(c_minute).rjust(2)})", end="", flush=True)
if multproc:
pool = multiprocessing.Pool()
for hour in hours:
for minute in minutes:
dt = datetime.datetime(year=slice_datetime.year, month=slice_datetime.month, day=slice_datetime.day, hour=hour, minute=minute)
pool.apply_async(_rasterizeTimesliceWorker,
(timeslices[hour][minute], rasterPath, imagePath, min_agents_range, max_agents_range, dt, xres, yres, perform_rasterization),
callback=callback)
pool.close()
pool.join()
else:
for hour in hours:
for minute in minutes:
dt = datetime.datetime(year=slice_datetime.year, month=slice_datetime.month, day=slice_datetime.day, hour=hour, minute=minute)
callback(_rasterizeTimesliceWorker(timeslices[hour][minute], rasterPath, imagePath, min_agents_range, max_agents_range, dt, xres, yres, perform_rasterization))
print()
shutil.rmtree(rasterPath)
def visualizeOverview(timeslices: dict, imagePath: str, startTime: datetime.datetime, endTime: datetime.datetime, write_out: bool = False):
"""
Visualize multiple timeslices by a line graph representing the minimum, mean and maximum number of usable vehicles per timeslice
:param timeslices: dictionary containing timeslices in format hour:minute:timeslice
:param imagePath: path to dictionary where output image should be stored
:param startTime: datetime representing begin of timeslices
:param endTime: datetime representing end of timeslices
:param write_out: whether or not to write image and overviewDf to disk
"""
maxAgents = 0
minAgents = 4000
meanAgents = []
maxs = []
mins = []
means = []
idxs = []
df_data = []
for hour in sorted(list(timeslices.keys())):
for minute in sorted(list(timeslices[hour].keys())):
minVal = timeslices[hour][minute][timeslices[hour][minute].countReachable > 5].countReachable.min()
maxVal = timeslices[hour][minute][timeslices[hour][minute].countReachable > 5].countReachable.max()
meanVal = timeslices[hour][minute][timeslices[hour][minute].countReachable > 5].countReachable.mean()
idx = datetime.datetime(year=startTime.year, month=startTime.month, day=startTime.day, hour=hour, minute=minute)
idxs.append(idx)
mins.append(minVal)
maxs.append(maxVal)
means.append(meanVal)
minAgents = min(minAgents, minVal)
maxAgents = max(maxAgents, maxVal)
meanAgents.append(meanVal)
df_data.append([idx, minVal, meanVal, maxVal])
meanAgents = int(np.mean(meanAgents))
log.debug(f"Minimum agents at one spot: {minAgents}, mean agents: {meanAgents}, maximum agents: {maxAgents}")
fig: plt.Figure = plt.figure(figsize=(15, 8), dpi=300)
ax: plt.Axes = plt.gca()
ax.plot_date(idxs, mins, '-g', label="minimum")
ax.plot_date(idxs, means, '-y', label="avgerage")
ax.plot_date(idxs, maxs, '-r', label="maximum")
ax.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=0))
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter("%H:%M"))
ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=10))
ax.set_xlim(datetime.datetime(year=startTime.year, month=startTime.month, day=startTime.day, hour=0, minute=0),
datetime.datetime(year=startTime.year, month=startTime.month, day=startTime.day, hour=23, minute=59), emit=False)
# removed for small sample based reproduction # ax.set_ylim(250,900)
fig.autofmt_xdate()
ax.legend()
plt.title(f"Minimum, average and maximum number of vehicles seamlessly reaching one vertex, per 10 minute timeslice")
plt.xlabel(f"time\nat {startTime.strftime('%d.%m.%y')}")
plt.ylabel("number of seamlessly reaching vehicles")
overview_df = | pd.DataFrame(df_data, columns=["idx", "min", "mean", "max"]) | pandas.DataFrame |
#!/srv/home/wconnell/anaconda3/envs/lightning
"""
Author: <NAME>
Date Initialized: 2021-09-09
Email: <EMAIL>
Script to evaluate new samples.
"""
###########################################################################################################################################
# # # # # # # # # # # # # # # # # #
# IMPORT MODULES
# # # # # # # # # # # # # # # # # #
###########################################################################################################################################
# I/O
from mmap import ACCESS_DEFAULT
import os
import sys
import argparse
from pathlib import Path
from typing import OrderedDict
import joblib
import pyarrow.feather as feather
from multiprocessing import Pool
# Data handling
import numpy as np
import pandas as pd
import torch
import pyarrow.dataset as ds
import optuna
from collections import OrderedDict
# Chem
from rdkit import DataStructs
from rdkit.Chem import MolFromSmiles
from rdkit.Chem import AllChem
# Transforms
from sklearn.preprocessing import StandardScaler
from scipy.optimize import curve_fit, OptimizeWarning
from scipy.integrate import quad
# Custom
from models import ConditionalNetwork
from train import read
###########################################################################################################################################
# # # # # # # # # # # # # # # # # #
# PRIMARY FUNCTIONS
# # # # # # # # # # # # # # # # # #
###########################################################################################################################################
def get_tx(tx_path, lm_genes):
# read
tx = pd.read_csv(tx_path, sep=" ", index_col="Entrez").dropna(subset=['HGNC']).drop(columns=['HGNC', 'GENEID'])
# add missing genes
miss_genes = lm_genes[~np.isin(lm_genes, tx.index)]
impute_df = pd.DataFrame(np.zeros((len(miss_genes), tx.shape[1])),
index=miss_genes,
columns=tx.columns)
tx = pd.concat((tx, impute_df))
tx = tx.loc[lm_genes].T
tx.columns = tx.columns.astype(int).astype(str)
return tx
def combine_features(cl, cpd):
if not cl.index.is_unique:
raise ValueError("Cell lines are not unique.")
samples = []
for line in cl.index:
line_df = cl.loc[np.repeat(line, len(cpd))].rename_axis('stripped_cell_line_name').reset_index().copy()
line_df = pd.DataFrame(data=np.concatenate((line_df, cpd), axis=1),
columns=list(line_df.columns) + list(cpd.columns))
samples.append(line_df)
return pd.concat(samples, ignore_index=True)
def generate_drc(cpd, min=1e-3, max=300, num=32):
drc = []
for c in cpd.index:
cpd_df = cpd.loc[np.repeat(c, num)].rename_axis('broad_cpd_id').reset_index()
cpd_df['cpd_conc_umol'] = np.geomspace(min, max, num)
drc.append(cpd_df)
return pd.concat(drc, ignore_index=True)
def parse_optimal_models(model_path):
trial_paths = []
for i in range(5):
study = optuna.load_study(f"FOLD_{i}", f"sqlite://///{str(model_path.resolve())}/FOLD_{i}/FOLD_{i}.db")
df = study.trials_dataframe()
df = df[df['number']<20]
number = df[df['value']==df['value'].max()]['number'].item()
path = list(model_path.joinpath(f"FOLD_{i}/model_logs/trial{number}_film_fold_{i}/checkpoints/").glob("*.ckpt"))
trial_paths.append(path[0])
return trial_paths
def create_eval_df(data_path, eval_path, custom_drc=True):
# load
gene_cols = joblib.load(data_path.joinpath("gene_cols.pkl"))
fp_cols = joblib.load(data_path.joinpath("fp_cols.pkl"))
mol_cols = np.append(fp_cols, ['cpd_conc_umol'])
if eval_path.joinpath(f'eval_data.tsv').exists():
eval_df = pd.read_csv(eval_path.joinpath(f'eval_data.tsv'), sep="\t")
else:
# retrieve training data compounds
data_ds = ds.dataset(data_path.joinpath('data.feather'), format='feather')
cols = list(fp_cols) + ['broad_cpd_id', 'cpd_conc_umol']
data_df = data_ds.to_table(columns=cols).to_pandas()
data_df = data_df.drop_duplicates(subset=['broad_cpd_id', 'cpd_conc_umol'])
if custom_drc:
data_df = data_df.drop_duplicates(subset=['broad_cpd_id']).set_index('broad_cpd_id')
data_df = generate_drc(data_df, min=1e-3, max=300, num=32)
# eval data
tx_files = ['HCC1806-gencode.v28-abundance.txt', 'MDA-gencode.v33-abundance.txt', 'SW480-gencode.v28-abundance.txt']
tx_df = pd.concat([
get_tx(eval_path.joinpath(f), gene_cols.astype(int)) for f in tx_files
])
# preprocess
tx_df = pd.DataFrame(StandardScaler().fit_transform(tx_df), columns=tx_df.columns, index=tx_df.index)
tx_df.index = tx_df.index.str.split("_", expand=True)
tx_df = tx_df.groupby(level=0).mean()
eval_df = combine_features(tx_df, data_df)
if not tx_df.shape[0] * data_df.shape[0] == eval_df.shape[0]:
raise ValueError("Incorrect feature concatenation.")
eval_df.to_csv(eval_path.joinpath(f'eval_data.tsv'), sep="\t")
return eval_df, gene_cols, mol_cols
def predict(model_ckpt, device, eval_df, gene_cols, mol_cols):
fold = model_ckpt.parents[3].stem.split("_")[1]
inputs = torch.FloatTensor(eval_df[gene_cols].to_numpy(dtype=np.float)).to(device)
conds = torch.FloatTensor(eval_df[mol_cols].to_numpy(dtype=np.float)).to(device)
preds = eval_df[['stripped_cell_line_name', 'broad_cpd_id', 'cpd_conc_umol']].copy()
model = ConditionalNetwork.load_from_checkpoint(model_ckpt,
map_location=None).to(device)
model.eval()
inputs_emb, conds_emb, y_hat = model(inputs, conds)
preds.loc[:,'cpd_pred_pv'] = y_hat.view(-1,1).detach().cpu().numpy()
preds['fold'] = fold
return preds
def combine(data_path, preds):
# retrieve training data compounds
data_ds = ds.dataset(data_path.joinpath('data.feather'), format='feather')
cols = ['stripped_cell_line_name', 'broad_cpd_id', 'cpd_conc_umol', 'cpd_pred_pv']
data_df = data_ds.to_table(columns=cols).to_pandas()
all_df = pd.concat([data_df, preds])
all_df['log_cpd_conc_umol'] = np.log(all_df['cpd_conc_umol'])
return all_df
def ll4(x, b, e, c, d):
'''
This function is basically a copy of the LL.4 function from the R drc package with
- b: hill slope
- e: EC50
- c: min response
- d: max response
src: https://gist.github.com/yannabraham/5f210fed773785d8b638
'''
return (c+(d-c)/(1+np.exp(b*(np.log(x)-np.log(e)))))
def ll3(x, b, e, c):
return ll4(x, b, e, c, d=1)
def ll2(x, b, e):
return ll4(x, b, e, c=0, d=1)
def IC50(b, e, c, d):
return np.exp( ( (np.log(d-0.5) - np.log(0.5-c)) / b + np.log(e)))
def ll3_ic50(b, e, c):
return IC50(b, e, c, d=1)
def ll2_ic50(b, e):
return IC50(b, e, c=0, d=1)
def fit_ll3(x, y):
"""Fit log-logistic function."""
p = 3
try:
popt, pcov = curve_fit(ll3, x, y, bounds=([-np.inf, -np.inf, 0], [np.inf, np.inf, 1]))
except (RuntimeError, OptimizeWarning) as error:
popt = tuple([np.nan] * p)
if not np.isnan(popt).any():
ic50 = ll3_ic50(*popt)
auc, err = quad(ll3, np.min(x), np.max(x), tuple(popt))
else:
ic50, auc = np.nan, np.nan
return popt, ic50, auc
def fit_ll2(x, y):
"""Fit log-logistic function."""
p = 2
try:
popt, pcov = curve_fit(ll2, x, y, bounds=([-np.inf, -np.inf], [np.inf, np.inf]))
except (RuntimeError, OptimizeWarning) as error:
popt = tuple([np.nan] * p)
if not np.isnan(popt).any():
ic50 = ll2_ic50(*popt)
auc, err = quad(ll2, np.min(x), np.max(x), tuple(popt))
else:
ic50, auc = np.nan, np.nan
return popt, ic50, auc
def qc_solubility(conc, pv):
"""Check if the last 2 maximum concentrations cause a viability increase of 20% or more back to baseline"""
qc_pass = True
max_conc_idx = np.argsort(conc)[-3:]
for i in range(1, 3):
if not pv[max_conc_idx[i]] - pv[max_conc_idx[0]] < 0.2:
qc_pass = False
return qc_pass
def qc_outliers(conc, pv):
"""Hueristic outlier detection based on sum of abosulte viability changes between sequential concentrations"""
qc_pass = True
if not np.sum(np.abs(np.diff(pv))) < len(conc) / 8:
qc_pass = False
return qc_pass
def fit_drc(group, data):
cols =['stripped_cell_line_name', 'broad_cpd_id', 'H', 'EC50', 'Emin', 'Emax', 'IC50', 'AUC', 'FUNC']
func = "none"
results = pd.DataFrame(data=[list(group) + [np.nan]*6 + [func]], columns=cols)
# QC checks
while not qc_solubility(data['cpd_conc_umol'].values, data['cpd_pred_pv'].values):
# remove the last value and check solubility again
data = data[data['cpd_conc_umol']<np.sort(data['cpd_conc_umol'].unique())[-1]]
if not qc_outliers(data['cpd_conc_umol'].values, data['cpd_pred_pv'].values):
cooks = True # TODO implement cook's distance outlier detection
if data['cpd_pred_pv'].min() > 0.4:
return results
# Fit
func = "ll3"
popt, ic50, auc = fit_ll3(data['cpd_conc_umol'].values, data['cpd_pred_pv'].values)
popt = list(popt) + [1]
if np.isnan(popt).any():
func = "ll2"
popt, ic50, auc = fit_ll2(data['cpd_conc_umol'].values, data['cpd_pred_pv'].values)
popt = list(popt) + [0, 1]
results = pd.DataFrame(data=[list(group) + list(popt) + [ic50, auc] + [func]], columns=cols)
return results
def process(data_path, eval_path, model_path):
map_location = ('cuda:0', 'cuda:1', 'cuda:2', 'cuda:3', 'cpu')
data = create_eval_df(data_path, eval_path)
trial_paths = parse_optimal_models(model_path)
if eval_path.joinpath(f"predictions.tsv").exists():
predictions = pd.read_csv(eval_path.joinpath(f"predictions.tsv"), sep="\t")
else:
predictions = pd.concat([predict(ckpt, device, *data) for ckpt, device in zip(trial_paths, map_location)])
predictions.to_csv(eval_path.joinpath(f"predictions.tsv"), sep="\t", index=False)
all_data = combine(data_path, predictions)
# filter negativ PV
all_data = all_data[all_data['cpd_pred_pv'] >= 0]
# add small eps to reduce log transform errors
all_data['cpd_pred_pv'] = all_data['cpd_pred_pv'] + 1e-32
# parallelize fitting logistic function
grp_data = all_data.groupby(['stripped_cell_line_name', 'broad_cpd_id'])
grp_data = [(grp, data.groupby(['cpd_conc_umol']).mean().reset_index().sort_values(by='cpd_conc_umol')) for grp,data in grp_data]
pool = Pool()
results = pool.starmap(fit_drc, grp_data)
results = | pd.concat(results) | pandas.concat |
import wget
import os
import zipfile
import shutil
from io import BytesIO
import requests
from zipfile import ZipFile
import pandas as pd
from itertools import product
from unidecode import unidecode
from covidmx.utils import translate_serendipia
from covidmx.dge_plot import DGEPlot
pd.options.mode.chained_assignment = None
URL_DATA = 'https://github.com/FedericoGarza/covidmx-data/releases/download/v0.0.0.9000/datos_abiertos_covid19.zip'
URL_DESCRIPTION = 'http://172.16.17.32/gobmx/salud/datos_abiertos/diccionario_datos_covid19.zip'
URL_HISTORICAL = 'http://172.16.17.32/gobmx/salud/datos_abiertos/historicos/datos_abiertos_covid19_{}.zip'
class DGE:
def __init__(
self,
clean=True,
return_catalogo=False,
return_descripcion=False,
date=None,
date_format='%d-%m-%Y',
data_path=None):
"""
Returns COVID19 data from the Direccion General de Epidemiología
"""
self.clean = clean
self.return_catalogo = return_catalogo
self.return_descripcion = return_descripcion
self.data_path = data_path
self.date = date
if date is not None:
self.date = | pd.to_datetime(date, format=date_format) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 24 11:44:50 2019
@author: Julia
"""
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg')
import pandas as pd
import numpy as np
from collections import defaultdict
from pyteomics import mass #parser, pepxml, mgf, mzml,
from pyteomics import electrochem as ec
#from scipy.spatial import cKDTree
from math import factorial
#from copy import copy
try:
from pyteomics import cmass
except ImportError:
cmass = mass
DIFF_C13 = mass.calculate_mass(formula='C[13]') - mass.calculate_mass(formula='C')
def get_theor_spectrum(peptide, acc_frag, types=('b', 'y'), maxcharge=None, **kwargs ):
"""
Calculates theoretical spectra in two ways: usual one. and formatter in integer (mz / frag_acc).
`peptide` -peptide sequence
`acc_frag` - accuracy of matching.
`types` - ion types.
`maxcharge` - maximum charge.
----------
Returns spectra in two ways (usual, integer)
"""
peaks = {}
theoretical_set = defaultdict(set)
pl = len(peptide) - 1
if not maxcharge:
maxcharge = 1 + int(ec.charge(peptide, pH=2))
for charge in range(1, maxcharge + 1):
for ion_type in types:
nterminal = ion_type[0] in 'abc'
if nterminal:
maxpart = peptide[:-1]
maxmass = cmass.fast_mass(maxpart, ion_type=ion_type, charge=charge, **kwargs)
marr = np.zeros((pl, ), dtype=float)
marr[0] = maxmass
for i in range(1, pl):
marr[i] = marr[i-1] - mass.fast_mass2([maxpart[-i]])/charge ### recalculate
else:
maxpart = peptide[1:]
maxmass = cmass.fast_mass(maxpart, ion_type=ion_type, charge=charge, **kwargs)
marr = np.zeros((pl, ), dtype=float)
marr[pl-1] = maxmass
for i in range(pl-2, -1, -1):
marr[i] = marr[i+1] - mass.fast_mass2([maxpart[-(i+2)]])/charge ### recalculate
tmp = marr / acc_frag
tmp = tmp.astype(int)
theoretical_set[ion_type].update(tmp)
marr.sort()
peaks[ion_type, charge] = marr
return peaks, theoretical_set
def RNHS_fast(spectrum_idict, theoretical_set, min_matched):
"""
Matches expetimental and theoretical spectra.
`spectrum_idict` - mass in int format (real mz / fragment accuracy)
`theoretical_set` -output of get_theor_spec, dict where keys is ion type, values
masses in int format.
`min_matched` - minumum peaks matched.
---------
Return score
"""
isum = 0
matched_approx_b, matched_approx_y = 0, 0
for ion in theoretical_set['b']:
if ion in spectrum_idict:
matched_approx_b += 1
isum += spectrum_idict[ion]
for ion in theoretical_set['y']:
if ion in spectrum_idict:
matched_approx_y += 1
isum += spectrum_idict[ion]
matched_approx = matched_approx_b + matched_approx_y
if matched_approx >= min_matched:
return matched_approx, factorial(matched_approx_b) * factorial(matched_approx_y) * isum
else:
return 0, 0
def peptide_isoforms(sequence, localizations, sum_mod=False):
"""
Forms list of modified amino acid candidates.
`variable_mods` - dict of modification's name (key) and amino acids (values)
Return list of isoforms [isoform1,isoform2]
"""
if sum_mod:
loc_ = set(localizations[0])
loc_1 = set(localizations[1])
loc_2 = set(localizations[2])
sum_seq_1 = []
isoforms = []
for i,j in enumerate(sequence):
if j in loc_1:
sum_seq_1.append(''.join([sequence[:i],'n', sequence[i:]]))
for s in sum_seq_1:
new_s = ''.join(['0', s, '0'])
for i,j in enumerate(new_s[1:-1], 1):
if j in loc_2 and new_s[i-1] !='n':
isoforms.append(''.join([new_s[1:i],'k', new_s[i:-1]]))
else:
loc_ = set(localizations)
isoforms = []
if 'N-term' in loc_:
isoforms.append(''.join(['m', sequence]))
if 'C-term' in loc_:
isoforms.append(''.join([sequence[:-1],'m', sequence[-1] ]))
for i,j in enumerate(sequence): #format='split'
if j in loc_:
isoforms.append(''.join([sequence[:i],'m', sequence[i:]]))
#[''.join(i) for i in j]
return set(isoforms)
def get_candidates_from_unimod(mass_shift, tolerance, unimod_db, unimod_df):
"""
Find modifications for `mass_shift` in Unimod.org database with a given `tolerance`.
Returns dict. {'modification name': [list of amino acids]}
"""
ind = list(unimod_df[abs(unimod_df['mono_mass']-mass_shift) < tolerance].index)
sites_set = set()
for i in unimod_db[ind]:
sites_set.update(set(pd.DataFrame(i['specificity']).site))
return list(sites_set)
def get_candidates_from_aastat(mass_shifts_table, labels, threshold = 1.5,):
df = mass_shifts_table.loc[:,labels]
ms, aa = np.where(df > threshold)
out = {ms:[] for ms in mass_shifts_table.index}
for i,j in zip(ms, aa):
out[df.index[i]].append(df.columns[j])
return pd.Series(out)
def find_isotopes(ms, tolerance=0.01):
"""
Find the isotopes from the `mass_shift_list` using mass difference of C13 and C12, information of amino acids statistics as well.
`locmod_ms` - Series there index in mass in str format, values actual mass shift.
-----------
Returns Series of boolean.
"""
out = | pd.DataFrame({'isotope':False, 'monoisotop_index': False}, index=ms.index) | pandas.DataFrame |
import argparse
import json
import os
import pandas as pd
import requests
def get_parser():
parser = argparse.ArgumentParser(description=__doc__)
input_group = parser.add_mutually_exclusive_group(required=True)
input_group.add_argument('-i', "--infile", action='store',
help="""Path to .txt file containing accessions of experiments to process or list of accessions separated by commas. The txt file must contain two columns with 1 header row, one labeled 'accession' and another labeled 'align_only'. It can optionally include a 3rd column for 'custom_message'.""")
parser.add_argument('-o', '--outputpath', action='store', default='',
help="""Optional path to output folder. Defaults to current path.""")
parser.add_argument('-g', '--gcpath', action='store', default='',
help="""Optional path where the input.json will be uploaded to the Google Cloud instance. Only affects the list of caper commands that is generated.""")
parser.add_argument('--wdl', action='store', default=False,
help="""Path to .wdl file.""")
parser.add_argument('-s', '--server', action='store', default='https://www.encodeproject.org',
help="""Optional specification of server using the full URL. Defaults to production server.""")
parser.add_argument('--use-s3-uris', action='store_true', default=False,
help="""Optional flag to use s3_uri links. Otherwise, defaults to using @@download links from the ENCODE portal.""")
input_group.add_argument("--accessions", action='store',
help="""List of accessions separated by commas.""")
parser.add_argument('--custom-message', action='store',
help="""An additional custom string to be appended to the messages in the caper submit commands.""")
parser.add_argument('--caper-commands-file-message', action='store', default='',
help="""An additional custom string to be appended to the file name of the caper submit commands.""")
return parser
def check_path_trailing_slash(path):
if path.endswith('/'):
return path.rstrip('/')
else:
return path
def build_experiment_report_query(experiment_list, server):
joined_list = '&accession='.join(experiment_list)
return server + '/report/?type=Experiment' + \
f'&accession={joined_list}' + \
'&field=@id' + \
'&field=accession' + \
'&field=files.s3_uri' + \
'&field=files.href' + \
'&field=replicates.library.biosample.organism.scientific_name' + \
'&limit=all' + \
'&format=json'
def build_file_report_query(experiment_list, server):
joined_list = '&dataset='.join(experiment_list)
return server + '/report/?type=File' + \
f'&dataset={joined_list}' + \
'&status=released' + \
'&status=in+progress' + \
'&file_format=fastq' + \
'&output_type=reads' + \
'&field=@id' + \
'&field=dataset' + \
'&field=biological_replicates' + \
'&field=replicate.library.adapters' + \
'&field=paired_end' + \
'&field=paired_with' + \
'&field=run_type' + \
'&field=read_length' + \
'&field=status' + \
'&field=s3_uri' + \
'&field=href' + \
'&field=replicate.status' + \
'&limit=all' + \
'&format=json'
def parse_infile(infile):
try:
infile_df = | pd.read_csv(infile, '\t') | pandas.read_csv |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL0", "CL1"])
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')
])
weights2 = pd.DataFrame(vals, index=widx, columns=["CO0", "CO1"])
weights = {"CL": weights1, "CO": weights2}
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15, 0.1, 0.15],
[0.075, 0.45, 0.075, 0.25],
[-0.5, 0.2, pd.np.NaN, pd.np.NaN]],
index=weights["CL"].index.levels[0],
columns=['CL0', 'CL1', 'CO0', 'CO1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_instr_rets_key_error():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5')])
irets = pd.Series([0.02, 0.01, 0.012], index=idx)
vals = [1, 1/2, 1/2, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
weights = | pd.DataFrame(vals, index=widx, columns=["CL1"]) | pandas.DataFrame |
#**************************************************************************************#
# Project: River Node
# Authors: <NAME>
# Department: CIDSE
# Semester: Fall 2016/Spring 2017
# Course Number and Name: CSE 492/493 Honors Thesis
# Supervisors: Dr. <NAME> & Dr. <NAME>
#**************************************************************************************#
# STANDARD LIBRARIES
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import pandas as pd
# MY FILES
from data_calc import *
from list_conversions import *
from map_data import map_points
dataList=[]
#**************************************************************************************#
# Functions #
#**************************************************************************************#
def update_data():
"""
Description:
Updates the data list as well as the feed data.
"""
global feed_data
feed_data = pd.read_json('https://io.adafruit.com/api/v2/specialKody/feeds/river-node-location-ph/data')
feed_data['created_at'] = pd.to_datetime(feed_data['created_at'], infer_datetime_format=True)
#This removes the unused data columns
feed_data.drop(feed_data.columns[[0,2,4,5,6,9,11]], axis=1, inplace=True)
lat = feed_data['lat']
lon = feed_data['lon']
dist = calculated_distance(lat,lon)
speedSeries = list_to_series(calculate_speeds(feed_data['created_at'], dist))
global dataList
dataList= [lat, lon, feed_data['ele'], feed_data['value'], feed_data['created_at'], speedSeries]
def map_ph(high_contrast):
"""
:param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast
Description:
Maps the pH values on the Basemap map through the map_points function call.
"""
map_points((dataList[1]).tolist(), (dataList[0]).tolist(), (pd.Series(dataList[3])).tolist(), high_contrast)
def elev_update(dump, line, ax, high_contrast):
"""
:param dump: Believe this is needed as garbage data goes into first parameter
:param line: The line to be updated
:param ax: The plot the line is currently on
:param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast
Description:
Updates the elevation line plot after pulling new data.
"""
plt.cla()
update_data()
elevation = pd.Series(dataList[2])
if(high_contrast):
line = ax.plot(elevation, linewidth=3.0)
else:
line = ax.plot(elevation)
return line
def plot_elev(real_time, high_contrast):
"""
:param real_time: This specifies if real time updates are to occur. 0=static data, 1=updating data
:param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast
Description:
This functions plots the elevation data.
"""
if(real_time):
elevation = pd.Series(dataList[2])
fig, ax = plt.subplots()
fig.canvas.set_window_title("Node Elevation")
ax.set_ylabel("Elevation (Meters)")
ax.set_xlabel("Measurment")
line = ax.plot(elevation)
ani = animation.FuncAnimation(fig, elev_update, interval=1000, fargs=(line, ax, high_contrast), blit=True)
else:
plt.figure("Node Elevation")
elevation = pd.Series(dataList[2])
if(high_contrast == 1):
elevation.plot(linewidth=3.0)
else:
elevation.plot()
plt.ylabel("Elevation (Meters)")
plt.xlabel("Measurment")
plt.show()
def ph_update(dump, line, ax, high_contrast):
"""
:param dump: Believe this is needed as garbage data goes into first parameter
:param line: The line to be updated
:param ax: The plot the line is currently on
:param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast
Description:
Updates the ph line plot after pulling new data.
"""
plt.cla()
update_data()
values = pd.Series(dataList[3])
if(high_contrast):
line = ax.plot(values, linewidth=3.0)
else:
line = ax.plot(values)
return line
def plot_ph(real_time, high_contrast):
"""
:param real_time: This specifies if real time updates are to occur. 0=static data, 1=updating data
:param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast
Description:
This function plots the PH data. The PH data is stored as 'value' by the Adafruit IOT website.
"""
if(real_time):
values = pd.Series(dataList[3])
fig, ax = plt.subplots()
fig.canvas.set_window_title("Node pH Recordings")
ax.set_ylabel("PH")
ax.set_xlabel("Measurment")
line = ax.plot(values)
ani = animation.FuncAnimation(fig, ph_update, interval=20000, fargs=(line, ax, high_contrast), blit=True)
else:
plt.figure("Node PH Recordings")
values = pd.Series(dataList[3])
if(high_contrast == 1):
values.plot(linewidth=3.0)
else:
values.plot()
plt.ylabel("PH")
plt.xlabel("Measurment")
plt.show()
def speed_update(dump, line, ax, high_contrast):
"""
:param dump: Believe this is needed as garbage data goes into first parameter
:param line: The line to be updated
:param ax: The plot the line is currently on
:param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast
Description:
Updates the speed line plot after pulling new data.
"""
plt.cla()
update_data()
speed = dataList[5]
if(high_contrast):
line = ax.plot(speed, linewidth=3.0)
else:
line = ax.plot(speed)
return line
def plot_speed(real_time, high_contrast):
"""
:param real_time: This specifies if real time updates are to occur. 0=static data, 1=updating data
:param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast
Description:
This function plots the calculated speed values. This requires a call to the list_to_series fucntion and the
calculate_speeds function.
"""
if(real_time):
speed = dataList[5]
fig, ax = plt.subplots()
fig.canvas.set_window_title("Node Speed")
ax.set_ylabel("Speed (Meters/Second)")
ax.set_xlabel("Measurment")
line = ax.plot(speed)
ani = animation.FuncAnimation(fig, speed_update, interval=20000, fargs=(line, ax, high_contrast), blit=True)
else:
plt.figure("Node Speed")
speedSeries = dataList[5]
if(high_contrast == 1):
speedSeries.plot(linewidth=3.0)
else:
speedSeries.plot()
plt.ylabel("Speed (Meters/Second)")
plt.xlabel("Measurment")
plt.show()
def export_data():
"""
Description:
Exports the feed data to a text file. This feed data has unused columns trimmed.
"""
global feed_data
a = feed_data.to_string(buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, line_width=None, max_rows=None, max_cols=None, show_dimensions=False)
f = open('dashboard_export.txt', 'w')
f.write(a)
def elev_update_nr(dump, line, ax, high_contrast):
"""
:param dump: Believe this is needed as garbage data goes into first parameter
:param line: The line to be updated
:param ax: The plot the line is currently on
:param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast
Description:
Updates the elevation plot without updating data.
"""
plt.cla()
elevation = pd.Series(dataList[2])
if(high_contrast):
line = ax.plot(elevation, linewidth=3.0)
else:
line = ax.plot(elevation)
return line
def speed_update_nr(dump, line, ax, high_contrast):
"""
:param dump: Believe this is needed as garbage data goes into first parameter
:param line: The line to be updated
:param ax: The plot the line is currently on
:param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast
Description:
Updates the speed plot without updating data.
"""
plt.cla()
speed = dataList[5]
if(high_contrast):
line = ax.plot(speed, linewidth=3.0)
else:
line = ax.plot(speed)
return line
def plot_combined(real_time, high_contrast):
"""
:param real_time: This specifies if real time updates are to occur. 0=static data, 1=updating data
:param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast
Description:
This function places all three line plots (pH, elevation, speed) on a single window one above another.
This is a seperate button as the stacking creates smaller graphing windows.
"""
if(real_time):
elevation = pd.Series(dataList[2])
values = pd.Series(dataList[3])
speed = dataList[5]
fig,(ax1, ax2, ax3) = plt.subplots(3, sharex=False, sharey=False)
fig.canvas.set_window_title("Combined Plots")
ax1.set_title("Node pH Recordings")
ax2.set_title("Node Elevation")
ax3.set_title("Node Speed")
ax1.set_ylabel("pH")
ax1.set_xlabel("Measurment")
ax2.set_ylabel("Meters")
ax2.set_xlabel("Measurment")
ax3.set_ylabel("Meters/Second")
ax3.set_xlabel("Measurment")
line1 = ax1.plot(values)
line2 = ax2.plot(elevation)
line3 = ax3.plot(speed)
ani1 = animation.FuncAnimation(fig, ph_update, interval=20000, fargs=(line1, ax1, high_contrast), blit=True)
ani2 = animation.FuncAnimation(fig, elev_update_nr, interval=20000, fargs=(line2, ax2, high_contrast), blit=True)
ani3 = animation.FuncAnimation(fig, speed_update_nr, interval=20000, fargs=(line3, ax3, high_contrast), blit=True)
else:
fig,(ax1, ax2, ax3) = plt.subplots(3, sharex=False, sharey=False)
values = | pd.Series(dataList[3]) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# In[3]:
import requests
import pandas as pd
import json
from tqdm import tqdm
PATH = '../../'
PATH_STATS = "../../data/france/stats/"
# In[5]:
# Download data from Santé publique France and export it to local files
def download_data_hosp_fra_clage():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/08c18e08-6780-452d-9b8c-ae244ad529b3")
with open(PATH + 'data/france/donnees-hosp-fra-clage.csv', 'wb') as f:
f.write(data.content)
def download_data_opencovid():
data = requests.get("https://raw.githubusercontent.com/opencovid19-fr/data/master/dist/chiffres-cles.csv")
with open(PATH + 'data/france/donnees-opencovid.csv', 'wb') as f:
f.write(data.content)
def download_data_vue_ensemble():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/d3a98a30-893f-47f7-96c5-2f4bcaaa0d71")
with open(PATH + 'data/france/synthese-fra.csv', 'wb') as f:
f.write(data.content)
def download_data_variants():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/848debc4-0e42-4e3b-a176-afc285ed5401") #https://www.data.gouv.fr/fr/datasets/r/c43d7f3f-c9f5-436b-9b26-728f80e0fd52
data_reg = requests.get("https://www.data.gouv.fr/fr/datasets/r/5ff0cad6-f150-47ea-a4e0-57e354c1b2a4") #https://www.data.gouv.fr/fr/datasets/r/73e8851a-d851-43f8-89e4-6178b35b7127
with open(PATH + 'data/france/donnees-variants.csv', 'wb') as f:
f.write(data.content)
with open(PATH + 'data/france/donnees-variants-reg.csv', 'wb') as f:
f.write(data.content)
def download_data_variants_deps():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/4d3e5a8b-9649-4c41-86ec-5420eb6b530c") #https://www.data.gouv.fr/fr/datasets/r/16f4fd03-797f-4616-bca9-78ff212d06e8
with open(PATH + 'data/france/donnees-variants-deps.csv', 'wb') as f:
f.write(data.content)
def download_data_vacsi_fra():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/efe23314-67c4-45d3-89a2-3faef82fae90")
with open(PATH + 'data/france/donnees-vacsi-fra.csv', 'wb') as f:
f.write(data.content)
def download_data_vacsi_reg():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/735b0df8-51b4-4dd2-8a2d-8e46d77d60d8")
with open(PATH + 'data/france/donnees-vacsi-reg.csv', 'wb') as f:
f.write(data.content)
def download_data_vacsi_dep():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/4f39ec91-80d7-4602-befb-4b522804c0af")
with open(PATH + 'data/france/donnees-vacsi-dep.csv', 'wb') as f:
f.write(data.content)
def download_data_obepine():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/031b79a4-5ee1-4f40-a804-b8abec3e99a6") #https://www.data.gouv.fr/fr/datasets/r/ba71be57-5932-4298-81ea-aff3a12a440c
with open(PATH + 'data/france/donnees_obepine_regions.csv', 'wb') as f:
f.write(data.content)
def download_data_donnees_vaccination_par_pathologie():
data = requests.get("https://datavaccin-covid.ameli.fr/explore/dataset/donnees-vaccination-par-pathologie/download/?format=csv&timezone=Europe/Berlin&lang=fr&use_labels_for_header=true&csv_separator=%3B")
with open(PATH + 'data/france/donnees-vaccination-par-pathologie.csv', 'wb') as f:
f.write(data.content)
def import_data_donnees_vaccination_par_pathologie():
df = pd.read_csv(PATH + 'data/france/donnees-vaccination-par-pathologie.csv', sep=None)
return df
def download_donnees_vaccination_par_tranche_dage_type_de_vaccin_et_departement():
data = requests.get("https://datavaccin-covid.ameli.fr/explore/dataset/donnees-vaccination-par-tranche-dage-type-de-vaccin-et-departement/download/?format=csv&timezone=Europe/Berlin&lang=fr&use_labels_for_header=true&csv_separator=%3B")
with open(PATH + 'data/france/donnees-tranche-dage-departement.csv', 'wb') as f:
f.write(data.content)
def import_donnees_vaccination_par_tranche_dage_type_de_vaccin_et_departement():
df = pd.read_csv(PATH + 'data/france/donnees-tranche-dage-departement.csv', sep=None)
return df
def import_data_obepine():
df = pd.read_csv(PATH + 'data/france/donnees_obepine_regions.csv', sep=None)
df_reg_pop = pd.read_csv(PATH + 'data/france/population_grandes_regions.csv', sep=",")
df = df.merge(right=df_reg_pop, left_on="Code_Region", right_on="code")
return df
def import_data_metropoles():
df_metro = pd.read_csv(PATH + 'data/france/donnes-incidence-metropoles.csv', sep=",")
epci = pd.read_csv(PATH + 'data/france/metropole-epci.csv', sep=";", encoding="'windows-1252'")
df_metro = df_metro.merge(epci, left_on='epci2020', right_on='EPCI').drop(['EPCI'], axis=1)
return df_metro
def import_data_hosp_clage():
df_hosp = pd.read_csv(PATH + 'data/france/donnes-hospitalieres-clage-covid19.csv', sep=";")
df_hosp = df_hosp.groupby(["reg", "jour", "cl_age90"]).first().reset_index()
df_reg_pop = pd.read_csv(PATH + 'data/france/population_grandes_regions.csv', sep=",")
df_hosp = df_hosp.merge(df_reg_pop, left_on="reg", right_on="code")
return df_hosp
def import_data_tests_viros():
df = pd.read_csv(PATH + 'data/france/tests_viro-dep-quot.csv', sep=";")
df_reg_pop = pd.read_csv(PATH + 'data/france/population_grandes_regions.csv', sep=",")
df_dep_reg = pd.read_csv(PATH + 'data/france/departments_regions_france_2016.csv', sep=",")
df["dep"] = df["dep"].astype(str)
df["dep"] = df["dep"].astype('str').str.replace(r"^([1-9])$", lambda m: "0"+m.group(0), regex=True)
df_dep_reg["departmentCode.astype"] = df_dep_reg.departmentCode.astype(str)
df = df.merge(df_dep_reg, left_on="dep", right_on="departmentCode", how="left")
df = df.merge(df_reg_pop, left_on="regionCode", right_on="code", how="left")
return df
def import_data_hosp_ad_age():
df = pd.read_csv('https://www.data.gouv.fr/fr/datasets/r/dc7663c7-5da9-4765-a98b-ba4bc9de9079', sep=";")
return df
def import_data_new():
df_new = pd.read_csv(PATH + 'data/france/donnes-hospitalieres-covid19-nouveaux.csv', sep=";")
return df_new
def import_data_df():
df = pd.read_csv(PATH + 'data/france/donnes-hospitalieres-covid19.csv', sep=";")
return df
def import_data_variants():
df_variants = pd.read_csv(PATH + 'data/france/donnees-variants.csv', sep=";")
df_variants["jour"] = df_variants.semaine.apply(lambda x: x[11:])
#df_variants = df_variants[df_variants.cl_age90==0]
return df_variants
def import_data_variants_deps():
df_variants = pd.read_csv(PATH + 'data/france/donnees-variants-deps.csv', sep=";")
df_variants["jour"] = df_variants.semaine.apply(lambda x: x[11:])
#df_variants = df_variants[df_variants.cl_age90==0]
return df_variants
def import_data_variants_regs():
df_variants = pd.read_csv(PATH + 'data/france/donnees-variants-regs.csv', sep=";")
df_variants["jour"] = df_variants.semaine.apply(lambda x: x[11:])
df_variants = df_variants[df_variants.cl_age90==0]
df_reg_pop = pd.read_csv(PATH + 'data/france/population_grandes_regions.csv', sep=",")
df_variants = df_variants.merge(df_reg_pop, left_on="reg", right_on="code")
return df_variants
def import_data_tests_sexe():
df = pd.read_csv(PATH + 'data/france/tests_viro-fra-covid19.csv', sep=";")
return df
def import_data_vue_ensemble():
df = pd.read_csv(PATH + 'data/france/synthese-fra.csv', sep=",")
df = df.sort_values(["date"])
with open(PATH_STATS + 'vue-ensemble.json', 'w') as outfile:
dict_data = {"cas": int(df["total_cas_confirmes"].diff().values[-1]), "update": df.date.values[-1][-2:] + "/" + df.date.values[-1][-5:-3]}
json.dump(dict_data, outfile)
return df
def import_data_opencovid():
df = pd.read_csv(PATH + 'data/france/donnees-opencovid.csv', sep=",")
"""with open(PATH_STATS + 'opencovid.json', 'w') as outfile:
dict_data = {"cas": int(df["cas_confirmes"].values[-1]), "update": df.index.values[-1][-2:] + "/" + df.index.values[-1][-5:-3]}
json.dump(dict_data, outfile)"""
return df
def import_data_vacsi_a_fra():
df = pd.read_csv(PATH + 'data/france/donnees-vacsi-a-fra.csv', sep=";")
df = df[df.clage_vacsi != 0]
return df
def import_data_vacsi_reg():
df = pd.read_csv(PATH + 'data/france/donnees-vacsi-reg.csv', sep=";")
return df
def import_data_vacsi_dep():
df = pd.read_csv(PATH + 'data/france/donnees-vacsi-dep.csv', sep=";")
return df
def import_data_vacsi_fra():
df = pd.read_csv(PATH + 'data/france/donnees-vacsi-fra.csv', sep=";")
return df
def import_data_vacsi_a_reg():
df = pd.read_csv(PATH + 'data/france/donnees-vacsi-a-reg.csv', sep=";")
df = df[df.clage_vacsi != 0]
return df
def import_data_vacsi_a_dep():
df = pd.read_csv(PATH + 'data/france/donnees-vacsi-a-dep.csv', sep=";")
df = df[df.clage_vacsi != 0]
return df
def import_data_hosp_fra_clage():
df = pd.read_csv(PATH + 'data/france/donnees-hosp-fra-clage.csv', sep=";").groupby(["cl_age90", "jour"]).sum().reset_index()
df = df[df.cl_age90 != 0]
return df
def download_data():
pbar = tqdm(total=8)
download_data_vacsi_fra()
download_data_vacsi_reg()
download_data_vacsi_dep()
url_metadata = "https://www.data.gouv.fr/fr/organizations/sante-publique-france/datasets-resources.csv"
url_geojson = "https://raw.githubusercontent.com/gregoiredavid/france-geojson/master/departements.geojson"
url_deconf = "https://www.data.gouv.fr/fr/datasets/r/f2d0f955-f9c4-43a8-b588-a03733a38921"
url_opencovid = "https://raw.githubusercontent.com/opencovid19-fr/data/master/dist/chiffres-cles.csv"
url_vacsi_a_fra = "https://www.data.gouv.fr/fr/datasets/r/54dd5f8d-1e2e-4ccb-8fb8-eac68245befd"
url_vacsi_a_reg = "https://www.data.gouv.fr/fr/datasets/r/c3ccc72a-a945-494b-b98d-09f48aa25337"
url_vacsi_a_dep = "https://www.data.gouv.fr/fr/datasets/r/83cbbdb9-23cb-455e-8231-69fc25d58111"
pbar.update(1)
metadata = requests.get(url_metadata)
pbar.update(2)
geojson = requests.get(url_geojson)
pbar.update(3)
with open(PATH + 'data/france/metadata.csv', 'wb') as f:
f.write(metadata.content)
pbar.update(4)
with open(PATH + 'data/france/dep.geojson', 'wb') as f:
f.write(geojson.content)
pbar.update(5)
df_metadata = pd.read_csv(PATH + 'data/france/metadata.csv', sep=";")
url_data = "https://www.data.gouv.fr/fr/datasets/r/63352e38-d353-4b54-bfd1-f1b3ee1cabd7" #df_metadata[df_metadata['url'].str.contains("/donnees-hospitalieres-covid19")]["url"].values[0] #donnees-hospitalieres-classe-age-covid19-2020-10-14-19h00.csv
url_data_new = "https://www.data.gouv.fr/fr/datasets/r/6fadff46-9efd-4c53-942a-54aca783c30c" #df_metadata[df_metadata['url'].str.contains("/donnees-hospitalieres-nouveaux")]["url"].values[0]
url_tests = df_metadata[df_metadata['url'].str.contains("/donnees-tests-covid19-labo-quotidien")]["url"].values[0]
url_metropoles = "https://www.data.gouv.fr/fr/datasets/r/61533034-0f2f-4b16-9a6d-28ffabb33a02" #df_metadata[df_metadata['url'].str.contains("/sg-metro-opendata")]["url"].max()
url_incidence = df_metadata[df_metadata['url'].str.contains("/sp-pe-tb-quot")]["url"].values[0]
url_tests_viro = df_metadata[df_metadata['url'].str.contains("/sp-pos-quot-dep")]["url"].values[0]
url_sursaud = df_metadata[df_metadata['url'].str.contains("sursaud.*quot.*dep")]["url"].values[0]
url_data_clage = df_metadata[df_metadata['url'].str.contains("/donnees-hospitalieres-classe-age-covid19")]["url"].values[0]
url_data_sexe = "https://www.data.gouv.fr/fr/datasets/r/dd0de5d9-b5a5-4503-930a-7b08dc0adc7c" #df_metadata[df_metadata['url'].str.contains("/sp-pos-quot-fra")]["url"].values[0]
pbar.update(6)
data = requests.get(url_data)
data_new = requests.get(url_data_new)
data_tests = requests.get(url_tests)
data_metropoles = requests.get(url_metropoles)
data_deconf = requests.get(url_deconf)
data_sursaud = requests.get(url_sursaud)
data_incidence = requests.get(url_incidence)
data_opencovid = requests.get(url_opencovid)
data_vacsi_a_fra = requests.get(url_vacsi_a_fra)
data_vacsi_a_reg = requests.get(url_vacsi_a_reg)
data_vacsi_a_dep = requests.get(url_vacsi_a_dep)
data_tests_viro = requests.get(url_tests_viro)
data_clage = requests.get(url_data_clage)
data_sexe = requests.get(url_data_sexe)
pbar.update(7)
with open(PATH + 'data/france/donnes-hospitalieres-covid19.csv', 'wb') as f:
f.write(data.content)
with open(PATH + 'data/france/donnes-hospitalieres-covid19-nouveaux.csv', 'wb') as f:
f.write(data_new.content)
with open(PATH + 'data/france/donnes-tests-covid19-quotidien.csv', 'wb') as f:
f.write(data_tests.content)
with open(PATH + 'data/france/donnes-incidence-metropoles.csv', 'wb') as f:
f.write(data_metropoles.content)
with open(PATH + 'data/france/indicateurs-deconf.csv', 'wb') as f:
f.write(data_deconf.content)
with open(PATH + 'data/france/sursaud-covid19-departement.csv', 'wb') as f:
f.write(data_sursaud.content)
with open(PATH + 'data/france/taux-incidence-dep-quot.csv', 'wb') as f:
f.write(data_incidence.content)
with open(PATH + 'data/france/tests_viro-dep-quot.csv', 'wb') as f:
f.write(data_tests_viro.content)
with open(PATH + 'data/france/donnes-hospitalieres-clage-covid19.csv', 'wb') as f:
f.write(data_clage.content)
with open(PATH + 'data/france/tests_viro-fra-covid19.csv', 'wb') as f:
f.write(data_sexe.content)
with open(PATH + 'data/france/donnees-opencovid.csv', 'wb') as f:
f.write(data_opencovid.content)
with open(PATH + 'data/france/donnees-vacsi-a-fra.csv', 'wb') as f:
f.write(data_vacsi_a_fra.content)
with open(PATH + 'data/france/donnees-vacsi-a-reg.csv', 'wb') as f:
f.write(data_vacsi_a_reg.content)
with open(PATH + 'data/france/donnees-vacsi-a-dep.csv', 'wb') as f:
f.write(data_vacsi_a_dep.content)
pbar.update(8)
# Import data from previously exported files to dataframes
def import_data():
pbar = tqdm(total=8)
pbar.update(1)
df = pd.read_csv(PATH + 'data/france/donnes-hospitalieres-covid19.csv', sep=";")
df.dep = df.dep.astype(str)
df_sursaud = pd.read_csv(PATH + 'data/france/sursaud-covid19-departement.csv', sep=";")
df_sursaud["dep"] = df_sursaud["dep"].astype('str').str.replace(r"^([1-9])$", lambda m: "0"+m.group(0), regex=True)
df_new = pd.read_csv(PATH + 'data/france/donnes-hospitalieres-covid19-nouveaux.csv', sep=";")
df_tests = pd.read_csv(PATH + 'data/france/donnes-tests-covid19-quotidien.csv', sep=";")
df_deconf = pd.read_csv(PATH + 'data/france/indicateurs-deconf.csv', sep=",")
df_incid = pd.read_csv(PATH + 'data/france/taux-incidence-dep-quot.csv', sep=";")
df_incid["dep"] = df_incid["dep"].astype('str')
df_incid["dep"] = df_incid["dep"].astype('str').str.replace(r"^([1-9])$", lambda m: "0"+m.group(0), regex=True)
df_tests_viro = | pd.read_csv(PATH + 'data/france/tests_viro-dep-quot.csv', sep=";") | pandas.read_csv |
"""
Experiment 1: swarm tec correlation
- for various background estimation sizes and artifact keys:
- collect random days
- get dtec prof
- interpolate swarm dne at the profile points
- estimate mean and covariance between the two
"""
import numpy as np
import pandas
from ttools import io, rbf_inversion, swarm, utils, config, convert
LW = 9
def run_experiment(n, bg_est_shape, artifact_key):
start_date = np.datetime64("2014-01-01")
end_date = np.datetime64("2020-01-01")
time_range_days = (end_date - start_date).astype('timedelta64[D]').astype(int)
offsets = np.random.randint(0, time_range_days, n)
dates = start_date + offsets.astype('timedelta64[D]')
x = []
dne = []
mlat_x = []
mlat_dne = []
for date in dates:
_x, _dne, _mlat_x, _mlat_dne = run_day(date, bg_est_shape, artifact_key)
x += _x
dne += _dne
mlat_x += _mlat_x
mlat_dne += _mlat_dne
x = np.concatenate(x, axis=0)
dne = np.concatenate(dne, axis=0)
mlat_x = np.array(mlat_x)
mlat_dne = np.array(mlat_dne)
data = np.column_stack((x, dne))
mean = np.nanmean(data, axis=0)
cov = pandas.DataFrame(data=data).corr().values
mlat_data = np.column_stack((mlat_x, mlat_dne))
mlat_mean = np.nanmean(mlat_data, axis=0)
mlat_cov = pandas.DataFrame(data=mlat_data).cov().values
return mean, cov, mlat_mean, mlat_cov
def run_day(date, bg_est_shape, artifact_key):
print(f"Running {date}")
one_h = np.timedelta64(1, 'h')
start_time = date.astype('datetime64[D]').astype('datetime64[s]')
end_time = start_time + np.timedelta64(1, 'D')
comparison_times = np.arange(start_time, end_time, one_h)
swarm_segments = swarm.get_segments_data(comparison_times)
swarm_troughs = swarm.get_swarm_troughs(swarm_segments)
tec_start = comparison_times[0] - np.floor(bg_est_shape[0] / 2) * one_h
tec_end = comparison_times[-1] + (np.floor(bg_est_shape[0] / 2) + 1) * one_h
tec, times, ssmlon, _ = io.get_tec_data(tec_start, tec_end)
x, times = rbf_inversion.preprocess_interval(tec, times, bg_est_shape=bg_est_shape)
utils.moving_func_trim(bg_est_shape[0], ssmlon)
if artifact_key is not None:
x -= rbf_inversion.get_artifacts(ssmlon, artifact_key)
data_grids = [config.mlat_grid, x[swarm_troughs['tec_ind']]]
mlat_profs, x_profs = utils.get_grid_slice_line(swarm_troughs['seg_e1_mlt'], swarm_troughs['seg_e1_mlat'],
swarm_troughs['seg_e2_mlt'], swarm_troughs['seg_e2_mlat'],
data_grids, config.mlt_grid, config.mlat_grid, linewidth=LW)
t1 = swarm_troughs['seg_e1_mlt'].values * np.pi / 12
t2 = swarm_troughs['seg_e2_mlt'].values * np.pi / 12
seg_mlt = utils.average_angles(t1, t2) * 12 / np.pi
mlon = convert.mlt_to_mlon_sub(seg_mlt, ssmlon[swarm_troughs['tec_ind']])
mlon_mask = ~((mlon >= 130) & (mlon <= 260))
x_list = []
dne_list = []
swarm_min_mlat = []
tec_min_mlat = []
for i, row in swarm_troughs[mlon_mask].iterrows():
segment = swarm_segments[row['sat']][row['direction']][row['tec_ind']]
idx = np.argsort(segment['mlat'])
smooth_dne = np.interp(mlat_profs[i], segment['mlat'][idx], segment['smooth_dne'][idx])
x_list.append(x_profs[i])
dne_list.append(smooth_dne)
if row['trough'] and np.isnan(x_profs[i]).mean() < .75:
swarm_min_mlat.append(row['min_mlat'])
tec_min_mlat.append(mlat_profs[i][np.nanargmin(x_profs[i])])
return x_list, dne_list, tec_min_mlat, swarm_min_mlat
if __name__ == "__main__":
import itertools
import matplotlib.pyplot as plt
bg_sizes = [13, 15, 17, 19, 21]
artifact_keys = [None, '3', '5', '7', '9']
data = []
mlat_data = []
n = 100
for i, (bg_size, artifact_key) in enumerate(itertools.product(bg_sizes, artifact_keys)):
print(bg_size, artifact_key)
mean, cov, mlat_mean, mlat_cov = run_experiment(n, (1, bg_size, bg_size), artifact_key)
data.append({'m0': mean[0], 'm1': mean[1], 's00': cov[0, 0], 's01': cov[0, 1], 's11': cov[1, 1],
'bg_size': bg_size, 'artifact_key': artifact_key})
mlat_data.append({'m0': mlat_mean[0], 'm1': mlat_mean[1], 's00': mlat_cov[0, 0], 's01': mlat_cov[0, 1],
's11': mlat_cov[1, 1], 'bg_size': bg_size, 'artifact_key': artifact_key})
data = | pandas.DataFrame(data) | pandas.DataFrame |
import unittest
import pandas as pd
import numpy as np
from khayyam import JalaliDate
from datetime import timedelta
from pandas_jalali.converter import get_gregorian_date_from_jalali_date, validate_jalali_date
class TestConverter(unittest.TestCase):
def setUp(self):
dt = JalaliDate(1346, 12, 30)
dt_jalali_y = []
dt_jalali_m = []
dt_jalali_d = []
dt_gregorian_y = []
dt_gregorian_m = []
dt_gregorian_d = []
for t in range(1, 10000):
dt += timedelta(days=1)
dt_jalali_y.append(dt.year)
dt_jalali_m.append(dt.month)
dt_jalali_d.append(dt.day)
gregorian = dt.todate()
dt_gregorian_y.append(gregorian.year)
dt_gregorian_m.append(gregorian.month)
dt_gregorian_d.append(gregorian.day)
self.dt_jalali_y = pd.Series(dt_jalali_y)
self.dt_jalali_m = pd.Series(dt_jalali_m)
self.dt_jalali_d = pd.Series(dt_jalali_d)
self.dt_gregorian_y = pd.Series(dt_gregorian_y)
self.dt_gregorian_m = pd.Series(dt_gregorian_m)
self.dt_gregorian_d = pd.Series(dt_gregorian_d)
def test_get_gregorian_date_from_jalali_date(self):
y, m, d = get_gregorian_date_from_jalali_date(
self.dt_jalali_y,
self.dt_jalali_m,
self.dt_jalali_d
)
self.assertTrue(y.equals(self.dt_gregorian_y.astype(float)))
self.assertTrue(m.equals(self.dt_gregorian_m.astype(float)))
self.assertTrue(d.equals(self.dt_gregorian_d.astype(float)))
def test_validate_jalali_date(self):
dt_jalali_y = pd.Series([4178, 1346, 1346, None, None, 1346])
dt_jalali_m = pd.Series([1, 1, 23, None, 1, 1])
dt_jalali_d = pd.Series([1, 34, 1, None, 1, 1])
y, m, d = validate_jalali_date(
dt_jalali_y,
dt_jalali_m,
dt_jalali_d
)
self.assertTrue(pd.Series(y).equals(pd.Series([np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 1346])))
self.assertTrue(pd.Series(m).equals(pd.Series([np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 1])))
self.assertTrue(pd.Series(d).equals(pd.Series([np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 1])))
def test_invalid_date_convertation(self):
dt_jalali_y = pd.Series([np.NaN, 1346])
dt_jalali_m = | pd.Series([np.NaN, 1]) | pandas.Series |
import os
import argparse
import warnings
import openslide
import numpy as np
import pandas as pd
from PIL import Image
from tqdm import tqdm
from pathlib import Path
from functools import partial
from joblib import Parallel, delayed
def tile(df_slide,
case_id,
path_img,
test,
cv,
path_out_base,
tile_size,
mpp_target):
slide_filename = os.path.basename(path_img)
slide_filename = os.path.splitext(slide_filename)[0]
path_out = os.path.join(path_out_base, slide_filename)
# Initialize the OpenSlide image
slide = openslide.OpenSlide(path_img)
mpp_source_base = float(slide.properties['openslide.mpp-x'])
if slide.properties['openslide.mpp-x'] != slide.properties['openslide.mpp-y']:
warnings.warn('Warning: Asymmetric pixel scaling...')
scaling_base = mpp_target / mpp_source_base
# Compute scaling at next-clostest resolution level
scaling_seq = [x for x in slide.level_downsamples if x <= scaling_base]
if scaling_seq:
scaling_tmp = max(scaling_seq)
resolution_level = slide.level_downsamples.index(scaling_tmp)
else:
resolution_level = 0
scaling_tmp = slide.level_downsamples[resolution_level]
mpp_source = mpp_source_base * slide.level_downsamples[resolution_level]
scaling = mpp_target / mpp_source
# Figure out the final tile size.
tile_size_source = np.round(tile_size*scaling).astype(int)
# Loop over all coordinates.
data = list()
for _, row in df_slide.iterrows():
h_start = row['top']
w_start = row['left']
h_end = row['bottom']
w_end = row['right']
tile_filename = slide_filename + '_' + str(h_start) + '_' + str(h_end) \
+ '_' + str(w_start) + '_' + str(w_end) + '.jpg'
# Write tiles to disk
if not os.path.exists(path_out):
os.makedirs(path_out)
tile = slide.read_region(location=(w_start, h_start),
level=resolution_level,
size=(tile_size_source, tile_size_source))
tile = tile.convert('RGB')
if tile.width != tile_size or tile.height != tile_size:
tile = tile.resize((tile_size, tile_size), Image.LANCZOS)
tile.save(os.path.join(path_out, tile_filename), quality=80)
# Store tile data.
keys = ['case_id', 'slide_filename', 'tile_filename', 'top', 'bottom',
'left', 'right', 'mpp', 'mpp_source', 'lvl', 'Test', 'CV']
values = [case_id, slide_filename, tile_filename, h_start, h_end, w_start, w_end,
mpp_target, mpp_source, resolution_level, test, cv]
data.append(dict(zip(keys, values)))
# Make sure the OpenSlide image is closed.
slide.close()
# If there are entries in the data list, make a df and save it
if not data:
df_tmp = None
else:
df_tmp = pd.DataFrame(data)
path_out_df = os.path.join(path_out, slide_filename + '.pkl')
df_tmp.to_pickle(path_out_df)
return df_tmp
if __name__ == '__main__':
# Get arguments
parser = argparse.ArgumentParser()
parser.add_argument('--indir', type=str, default=None, help='path/to/slides')
parser.add_argument('--outdir', type=str, default=None, help='path/to/tiles')
parser.add_argument('--n_jobs', type=int, default=1, help='Number of parallel executions.')
args = parser.parse_args()
path_slides = args.indir
path_tiles = args.outdir
n_jobs = args.n_jobs
# Set paths, get dfs
path_dfs_base = os.path.join(os.path.dirname(os.getcwd()), 'data')
path_df_tile = os.path.join(path_dfs_base, 'df_tile.pkl')
df_meta = pd.read_csv(os.path.join(path_dfs_base, 'df_meta.csv'))
df_tile_coords = pd.read_pickle(os.path.join(path_dfs_base, 'df_tile_coordinates.pkl'))
# Create a dictionary that maps all file names to paths
paths = list()
names = list()
exists = 0
for path in Path(path_slides).rglob('*.svs'):
paths.append(str(path))
names.append(str(path.name))
print('Number of slides found:', len(paths))
dict_lookup = dict(zip(names, paths))
# Get lists for tiling
l_dfs = list()
l_ids = list()
l_paths = list()
l_test = list()
l_cv = list()
print('Collecting tiling data...')
for _, row in tqdm(df_meta.iterrows(), total=len(df_meta)):
case_id = row['case_id']
slide_name = row['slide_name']
test = row['test']
cv = row['CV']
df_tmp = df_tile_coords.loc[df_tile_coords['Case ID']==case_id].reset_index(drop=True)
path_curr = dict_lookup[slide_name]
l_dfs.append(df_tmp)
l_ids.append(case_id)
l_paths.append(path_curr)
l_test.append(test)
l_cv.append(cv)
# Make partial tiling function for 40X
tile_par = partial(tile,
path_out_base=path_tiles,
tile_size=500,
mpp_target=0.252)
print('Tiling WSIs...')
dfs = Parallel(n_jobs=n_jobs)(delayed(tile_par)(df_slide, case_id, path_img, test, cv)
for df_slide, case_id, path_img, test, cv
in tqdm(zip(l_dfs, l_ids, l_paths, l_test, l_cv), total=len(l_dfs)))
# Concat output
df_total = | pd.concat(dfs) | pandas.concat |
import numpy as np
import pandas as pd
from .conversions import (
cube2even_row_offset, cube2odd_row_offset,
cube2even_col_offset, cube2odd_col_offset,
even_row_offset2cube, odd_row_offset2cube,
even_col_offset2cube, odd_col_offset2cube,
cube2cartesian_pointy_top, cube2cartesian_flat_top
)
class HexPoints:
def __init__(self, x=(), y=(), z=(), data=None, orientation='pointy_top'):
if orientation not in ('pointy_top', 'flat_top'):
raise ValueError('orientation must be "pointy_top" or "flat_top"')
self.orientation = orientation
self.points = np.array([x, y, z])
if self.points.ndim == 1:
self.points.shape = (1, 3)
else:
self.points = self.points.T
if not np.isclose(np.sum(self.points), 0):
raise ValueError('Cube coordinates do not add up to 0')
self.data = data
def __add__(self, other):
if self.orientation != other.orientation:
raise ValueError('HexPoints have different orientations')
cube = self.points + other.points
return self.__class__.from_points(cube, orientation=self.orientation)
def __sub__(self, other):
if self.orientation != other.orientation:
raise ValueError('HexPoints have different orientations')
cube = self.points - other.points
return self.__class__.from_points(cube, orientation=self.orientation)
def __mul__(self, number):
if not np.isscalar(number):
raise TypeError('HexPoints coordinates can only be multiplied by scalars')
points = self.points * number
return self.__class__.from_points(points, orientation=self.orientation)
def __rmul__(self, number):
return self.__mul__(number)
def __radd__(self, other):
return self.__add__(other)
@classmethod
def from_points(cls, points, orientation='pointy_top', data=None):
cube = np.array(points)
return cls(
cube[:, 0], cube[:, 1], cube[:, 2],
orientation=orientation,
data=data,
)
@classmethod
def from_even_row_offset(cls, col, row, data=None):
x, y, z = even_row_offset2cube(col, row)
return cls(x, y, z, orientation='pointy_top', data=data)
@classmethod
def from_odd_row_offset(cls, col, row, data=None):
x, y, z = odd_row_offset2cube(col, row)
return cls(x, y, z, orientation='pointy_top', data=data)
@classmethod
def from_even_col_offset(cls, col, row, data=None):
x, y, z = even_col_offset2cube(col, row)
return cls(x, y, z, orientation='flat_top', data=data)
@classmethod
def from_odd_col_offset(cls, col, row, data=None):
x, y, z = odd_col_offset2cube(col, row)
return cls(x, y, z, orientation='flat_top', data=data)
@classmethod
def from_axial(cls, x, z, orientation='pointy_top', data=None):
return cls(x, -x - z, z, orientation=orientation, data=data)
@property
def x(self):
if len(self) > 0:
return self.points[:, 0]
else:
return np.array([])
@property
def y(self):
if len(self) > 0:
return self.points[:, 1]
else:
return np.array([])
@property
def z(self):
if len(self) > 0:
return self.points[:, 2]
else:
return np.array([])
@property
def even_row_offset(self):
if self.orientation == 'flat_top':
raise NotImplemented(
'Even row offset coordinates not valid for flat_top orientation'
)
return cube2even_row_offset(self.x, self.y, self.z)
@property
def odd_row_offset(self):
if self.orientation == 'flat_top':
raise NotImplemented(
'Even row offset coordinates not valid for flat_top orientation'
)
return cube2odd_row_offset(self.x, self.y, self.z)
@property
def even_col_offset(self):
if self.orientation == 'pointy_top':
raise NotImplemented(
'Even row offset coordinates not valid for pointy_top orientation'
)
return cube2even_col_offset(self.x, self.y, self.z)
@property
def odd_col_offset(self):
if self.orientation == 'pointy_top':
raise NotImplemented(
'Even row offset coordinates not valid for pointy_top orientation'
)
return cube2odd_col_offset(self.x, self.y, self.z)
@property
def axial(self):
return self.points[:, 0], self.points[:, 2]
@property
def cartesian(self):
if self.orientation == 'pointy_top':
return cube2cartesian_pointy_top(self.x, self.y, self.z)
return cube2cartesian_flat_top(self.x, self.y, self.z)
def __len__(self):
return self.points.shape[0]
def __repr__(self):
np.set_printoptions(threshold=5)
s = self.__class__.__name__
s += '({}'.format(self.orientation)
if len(self) == 0:
return s + ')'
s += ',\n '
if len(self) > 6:
with pd.option_context('display.max_rows', 6):
s += '\n '.join(str(self.data.reset_index()).splitlines()[:-2])
else:
s += '\n '.join(str(self.data.reset_index()).splitlines())
s += '\n)'
return s
def __getitem__(self, sl):
other = self.__class__([], [], [], orientation=self.orientation)
cube = self.points[sl]
if cube.ndim == 1:
cube.shape = (1, 3)
other.points = cube
other.data = self.data.loc[other._data_index]
return other
def __eq__(self, other):
return np.all(self.points == other.points, axis=1)
@property
def data(self):
return self._data
@data.setter
def data(self, df):
if isinstance(df, pd.DataFrame):
self._data = df.set_index(self._data_index)
else:
df = pd.DataFrame(df, index=self._data_index)
self._data = df
@property
def _data_index(self):
return | pd.MultiIndex.from_arrays([self.x, self.z], names=['x', 'z']) | pandas.MultiIndex.from_arrays |
#code will get the proper values like emyield, marketcap, cacl, etc, and supply a string and value to put back into the dataframe.
import pandas as pd
import numpy as np
import logging
import inspect
from scipy import stats
from dateutil.relativedelta import relativedelta
from datetime import datetime
from scipy import stats
import math
class quantvaluedata: #just contains functions, will NEVEFR actually get the data
def __init__(self,allitems=None):
if allitems is None:
self.allitems=[]
else:
self.allitems=allitems
return
def get_value(self,origdf,key,i=-1):
if key not in origdf.columns and key not in self.allitems and key not in ['timedepositsplaced','fedfundssold','interestbearingdepositsatotherbanks']:
logging.error(key+' not found in allitems')
#logging.error(self.allitems)
return None
df=origdf.copy()
df=df.sort_values('yearquarter')
if len(df)==0:
##logging.error("empty dataframe")
return None
if key not in df.columns:
#logging.error("column not found:"+key)
return None
interested_quarter=df['yearquarter'].iloc[-1]+i+1#because if we want the last quarter we need them equal
if not df['yearquarter'].isin([interested_quarter]).any(): #if the quarter we are interested in is not there
return None
s=df['yearquarter']==interested_quarter
df=df[s]
if len(df)>1:
logging.error(df)
logging.error("to many rows in df")
exit()
pass
value=df[key].iloc[0]
if | pd.isnull(value) | pandas.isnull |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/11/23 20:08
Desc: 新浪财经-国内期货-实时数据获取
http://vip.stock.finance.sina.com.cn/quotes_service/view/qihuohangqing.html#titlePos_3
P.S. 注意采集速度, 容易封禁 IP, 如果不能访问请稍后再试
"""
import json
import time
import pandas as pd
import requests
from akshare.futures.cons import (
zh_subscribe_exchange_symbol_url,
zh_match_main_contract_url,
zh_match_main_contract_payload,
)
from akshare.futures.futures_contract_detail import futures_contract_detail
from akshare.utils import demjson
def zh_subscribe_exchange_symbol(exchange: str = "dce") -> dict:
"""
交易所具体的可交易品种
http://vip.stock.finance.sina.com.cn/quotes_service/view/qihuohangqing.html#titlePos_1
:param exchange: choice of {'czce', 'dce', 'shfe', 'cffex'}
:type exchange: str
:return: 交易所具体的可交易品种
:rtype: dict
"""
r = requests.get(zh_subscribe_exchange_symbol_url)
r.encoding = "gbk"
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): data_text.find("};") + 1])
if exchange == "czce":
data_json["czce"].remove("郑州商品交易所")
return pd.DataFrame(data_json["czce"])
if exchange == "dce":
data_json["dce"].remove("大连商品交易所")
return pd.DataFrame(data_json["dce"])
if exchange == "shfe":
data_json["shfe"].remove("上海期货交易所")
return pd.DataFrame(data_json["shfe"])
if exchange == "cffex":
data_json["cffex"].remove("中国金融期货交易所")
return pd.DataFrame(data_json["cffex"])
def match_main_contract(exchange: str = "cffex") -> str:
"""
获取主力合约
:param exchange: choice of {'czce', 'dce', 'shfe', 'cffex'}
:type exchange: str
:return: 获取主力合约的字符串
:rtype: str
"""
subscribe_exchange_list = []
exchange_symbol_list = zh_subscribe_exchange_symbol(exchange).iloc[:, 1].tolist()
for item in exchange_symbol_list:
# item = 'sngz_qh'
zh_match_main_contract_payload.update({"node": item})
res = requests.get(
zh_match_main_contract_url, params=zh_match_main_contract_payload
)
data_json = demjson.decode(res.text)
data_df = pd.DataFrame(data_json)
try:
main_contract = data_df[data_df.iloc[:, 3:].duplicated()]
print(main_contract["symbol"].values[0])
subscribe_exchange_list.append(main_contract["symbol"].values[0])
except:
if len(data_df) == 1:
subscribe_exchange_list.append(data_df["symbol"].values[0])
print(data_df["symbol"].values[0])
else:
print(item, "无主力合约")
continue
print(f"{exchange}主力合约获取成功")
return ",".join(["nf_" + item for item in subscribe_exchange_list])
def futures_zh_spot(
subscribe_list: str = "nf_IF1912,nf_TF1912,nf_IH1912,nf_IC1912",
market: str = "CF",
adjust: bool = False,
) -> pd.DataFrame:
"""
期货的实时行情数据
:param subscribe_list: 行情的字符串组合
:type subscribe_list: str
:param market: CF 为商品期货
:type market: str
:param adjust: True or False
:type adjust: bool
:return: 期货的实时行情数据
:rtype: pandas.DataFrame
"""
url = f"https://hq.sinajs.cn/rn={round(time.time() * 1000)}&list={subscribe_list}"
res = requests.get(url)
data_df = pd.DataFrame(
[
item.strip().split("=")[1].split(",")
for item in res.text.split(";")
if item.strip() != ""
]
)
data_df.iloc[:, 0] = data_df.iloc[:, 0].str.replace('"', "")
data_df.iloc[:, -1] = data_df.iloc[:, -1].str.replace('"', "")
if adjust:
contract_name_list = [item.split("_")[1] for item in subscribe_list.split(",")]
contract_min_list = []
contract_exchange_list = []
for contract_name in contract_name_list:
# print(contract_name)
# contract_name = 'AP2101'
temp_df = futures_contract_detail(contract=contract_name)
exchange_name = temp_df[temp_df["item"] == "上市交易所"]["value"].values[0]
contract_exchange_list.append(exchange_name)
contract_min = temp_df[temp_df["item"] == "最小变动价位"]["value"].values[0]
contract_min_list.append(contract_min)
if market == "CF":
data_df.columns = [
"symbol",
"time",
"open",
"high",
"low",
"last_close",
"bid_price",
"ask_price",
"current_price",
"avg_price",
"last_settle_price",
"buy_vol",
"sell_vol",
"hold",
"volume",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
data_df = data_df[
[
"symbol",
"time",
"open",
"high",
"low",
"current_price",
"bid_price",
"ask_price",
"buy_vol",
"sell_vol",
"hold",
"volume",
"avg_price",
"last_close",
"last_settle_price",
]
]
data_df["exchange"] = contract_exchange_list
data_df["contract"] = contract_name_list
data_df["contract_min_change"] = contract_min_list
return data_df
else:
data_df.columns = [
"open",
"high",
"low",
"current_price",
"volume",
"amount",
"hold",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_" "_",
"time",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"symbol",
]
data_df = data_df[
[
"symbol",
"time",
"open",
"high",
"low",
"current_price",
"hold",
"volume",
"amount",
]
]
data_df["exchange"] = contract_exchange_list
data_df["contract"] = contract_name_list
data_df["contract_min_change"] = contract_min_list
return data_df
else:
if market == "CF":
data_df.columns = [
"symbol",
"time",
"open",
"high",
"low",
"last_close",
"bid_price",
"ask_price",
"current_price",
"avg_price",
"last_settle_price",
"buy_vol",
"sell_vol",
"hold",
"volume",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
data_df = data_df[
[
"symbol",
"time",
"open",
"high",
"low",
"current_price",
"bid_price",
"ask_price",
"buy_vol",
"sell_vol",
"hold",
"volume",
"avg_price",
"last_close",
"last_settle_price",
]
]
return data_df
else:
data_df.columns = [
"open",
"high",
"low",
"current_price",
"volume",
"amount",
"hold",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_" "_",
"time",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"symbol",
]
data_df = data_df[
[
"symbol",
"time",
"open",
"high",
"low",
"current_price",
"hold",
"volume",
"amount",
]
]
return data_df
def futures_zh_minute_sina(symbol: str = "IF2008", period: str = "5") -> pd.DataFrame:
"""
中国各品种期货分钟频率数据
http://vip.stock.finance.sina.com.cn/quotes_service/view/qihuohangqing.html#titlePos_3
:param symbol: 可以通过 match_main_contract(exchange="cffex") 获取, 或者访问网页获取
:type symbol: str
:param period: choice of {"1": "1分钟", "5": "5分钟", "15": "15分钟", "30": "30分钟", "60": "60分钟"}
:type period: str
:return: 指定 symbol 和 period 的数据
:rtype: pandas.DataFrame
"""
url = "https://stock2.finance.sina.com.cn/futures/api/jsonp.php/=/InnerFuturesNewService.getFewMinLine"
params = {
"symbol": symbol,
"type": period,
}
r = requests.get(url, params=params)
temp_df = pd.DataFrame(json.loads(r.text.split("=(")[1].split(");")[0]))
temp_df.columns = ["datetime", "open", "high", "low", "close", "volume", "hold"]
temp_df['open'] = pd.to_numeric(temp_df['open'])
temp_df['high'] = pd.to_numeric(temp_df['high'])
temp_df['low'] = pd.to_numeric(temp_df['low'])
temp_df['close'] = pd.to_numeric(temp_df['close'])
temp_df['volume'] = pd.to_numeric(temp_df['volume'])
temp_df['hold'] = pd.to_numeric(temp_df['hold'])
return temp_df
def futures_zh_daily_sina(symbol: str = "V2105") -> pd.DataFrame:
"""
中国各品种期货日频率数据
https://finance.sina.com.cn/futures/quotes/V2105.shtml
:param symbol: 可以通过 match_main_contract(exchange="cffex") 获取, 或者访问网页获取
:type symbol: str
:return: 指定 symbol 和 period 的数据
:rtype: pandas.DataFrame
"""
date = "20210412"
url = "https://stock2.finance.sina.com.cn/futures/api/jsonp.php/var%20_V21052021_4_12=/InnerFuturesNewService.getDailyKLine"
params = {
"symbol": symbol,
"type": '_'.join([date[:4], date[4:6], date[6:]]),
}
r = requests.get(url, params=params)
temp_df = pd.DataFrame(json.loads(r.text.split("=(")[1].split(");")[0]))
temp_df.columns = ["date", "open", "high", "low", "close", "volume", "hold", "settle"]
temp_df['open'] = pd.to_numeric(temp_df['open'])
temp_df['high'] = pd.to_numeric(temp_df['high'])
temp_df['low'] = pd.to_numeric(temp_df['low'])
temp_df['close'] = pd.to_numeric(temp_df['close'])
temp_df['volume'] = pd.to_numeric(temp_df['volume'])
temp_df['hold'] = pd.to_numeric(temp_df['hold'])
temp_df['settle'] = | pd.to_numeric(temp_df['settle']) | pandas.to_numeric |
import os
import re
from datetime import date, datetime
from warnings import warn
import pandas as pd
from dateutil import relativedelta
from pandas.tseries.holiday import USFederalHolidayCalendar
from pandas.tseries.offsets import CustomBusinessDay
# Directories for data sets
dirname = os.path.dirname(__file__)
nyse_dir = os.path.join(dirname, '../data/nyse/')
nasdaq_dir = os.path.join(dirname, '../data/nasdaq/')
class DataSet:
# Create a calendar of US business days
us_bd = CustomBusinessDay(calendar=USFederalHolidayCalendar())
# Create list of NASDAQ and NYSE years in data set
nasdaq_years = list(map(int, [j for i, j, y in os.walk(nasdaq_dir) if j][0]))
nyse_years = list(map(int, [j for i, j, y in os.walk(nyse_dir) if j][0]))
# Check if data set contains same year directories
assert len(nasdaq_years) == len(nyse_years) and sorted(nasdaq_years) == sorted(
nyse_years), 'Exchange year data directories do not match.'
def __init__(self, months: int = 12, year: int = 2017):
# Set the year
if year and year not in list(set(self.nasdaq_years + self.nyse_years)):
raise ValueError("Year is not in data set.")
else:
self.year = year
print('Preparing data for Y{}'.format(self.year))
# Create lists of days for each exchange
# TODO: There should be a DataFile class for these.
self.nasdaq_day_files = [c for a, b, c in os.walk(nasdaq_dir + '/{}'.format(self.year))][0]
self.nasdaq_days = list(map(int, [re.split('_|\.', d)[1] for d in self.nasdaq_day_files]))
self.nasdaq_dates = [datetime.strptime(str(d), '%Y%m%d') for d in self.nasdaq_days]
self.nyse_day_files = [c for a, b, c in os.walk(nyse_dir + '/{}'.format(self.year))][0]
self.nyse_days = list(map(int, [re.split('_|\.', d)[1] for d in self.nyse_day_files]))
self.nyse_dates = [datetime.strptime(str(d), '%Y%m%d') for d in self.nyse_days]
assert len(self.nasdaq_days) == len(self.nyse_days) and sorted(self.nasdaq_days) == sorted(
self.nyse_days), 'Exchange day sets do not match'
# Count the expected number of trading days based off of pandas US business holiday calendar
self.expected_trading_days = pd.DatetimeIndex(start='{}-01-01'.format(self.year),
end='{}-12-31'.format(self.year), freq=self.us_bd).size
# Get number of trading days
self.num_trading_days = len(list(set(self.nasdaq_days + self.nyse_days)))
if self.expected_trading_days != self.num_trading_days:
warn('Number of days in data set ({}) does not match the number of expected trading days ({}).'.format(
self.num_trading_days, self.expected_trading_days))
# TODO: user should be able to check for n months back in data set, if day n months ago is NOT a business day,
# get the next business day.
# TODO: should also be able to set a date range for data
# assert 0 < months < 24, 'Parameter months must be in range: 0 < months < 24'
# if not is_business_day(get_relative_date(months)):
# pass
# Load data
self.data: pd.DataFrame = self.load_dataframe()
def load_dataframe(self) -> pd.DataFrame:
"""
Creates a main DataFrame from all data sets then transposes the index and columns.
:return: Transposed DataFrame of all CSV data files.
"""
# Read data files into DataFrames
nasdaq_df, nasdaq_vol = self.df_read_data_sets('{}{}/'.format(nasdaq_dir, self.year))
nyse_df, nyse_vol = self.df_read_data_sets('{}{}/'.format(nyse_dir, self.year))
# Concat DataFrames into main result DataFrames
main_df = pd.concat([nasdaq_df, nyse_df], axis=0)
main_df.index = pd.to_datetime(main_df.index)
self.volume = | pd.concat([nasdaq_vol, nyse_vol], axis=0) | pandas.concat |
"""
Results visualisation for the SimpleEnergyModel - MJ2383
"""
#%%
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import os
#%%
# import sys
# def main(filepath1, filepath2):
#%%
path = os.path.join('SimpleEnergyModel', 'SimpleEnergyModel_Gas', 'data', 'SpecifiedAnnualDemand.csv')
Demand = pd.read_csv(path)
## Demand
D = Demand.loc[Demand.FUEL == 'FEL']
Years = Demand.YEAR.unique()
#%%
# SimpleEnergyModel_Gas
path = os.path.join('SimpleEnergyModel', 'SimpleEnergyModel_Gas', 'results', 'ProductionByTechnologyAnnual.csv')
Production = pd.read_csv(path)
## Production By Technology Annual
ProductionData = {}
for i in Production.TECHNOLOGY.unique():
data = Production.loc[Production.TECHNOLOGY == i]
Region = data.REGION.unique()
Technology = data.TECHNOLOGY.unique()
Fuel = data.FUEL.unique()
if len(data.index)<27:
data=data.set_index('YEAR').reindex(Years).reset_index().fillna(0)
data.loc[data.VALUE == 0, 'REGION'] = Region[0]
data.loc[data.VALUE == 0, 'TECHNOLOGY'] = Technology[0]
data.loc[data.VALUE == 0, 'FUEL'] = Fuel[0]
ProductionData[i] = data
for x in ProductionData:
if x == 'NGCC':
NGCC = ProductionData[x]
if x == 'Backstop':
B = ProductionData[x]
if x == 'SOLPV':
SOLPV = ProductionData[x]
if x == 'WIND':
WIND = ProductionData[x]
plt.plot(D.YEAR, D.VALUE)
plt.xlabel('Years')
plt.ylabel('Demand')
fig1 = plt.stackplot(Years, NGCC.VALUE, B.VALUE, labels=['NGCC', 'Backstop'])
plt.legend(loc='upper left')
NGCC = pd.DataFrame()
B = pd.DataFrame()
SOLPV = | pd.DataFrame() | pandas.DataFrame |
"""Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- <NAME> & <NAME>
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from unittest import TestCase
from datetime import datetime, timedelta
from numpy.ma.testutils import assert_equal
from pandas.tseries.period import Period, PeriodIndex
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.tools import to_datetime
import pandas.core.datetools as datetools
import numpy as np
from pandas import Series, TimeSeries
from pandas.util.testing import assert_series_equal
class TestPeriodProperties(TestCase):
"Test properties such as year, month, weekday, etc...."
#
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_interval_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEquals(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assert_(i1 != i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/12/12', freq='D')
self.assertEquals(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEquals(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEquals(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEquals(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEquals(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i1 = Period('05Q1')
self.assertEquals(i1, i2)
lower = Period('05q1')
self.assertEquals(i1, lower)
i1 = Period('1Q2005')
self.assertEquals(i1, i2)
lower = Period('1q2005')
self.assertEquals(i1, lower)
i1 = Period('1Q05')
self.assertEquals(i1, i2)
lower = Period('1q05')
self.assertEquals(i1, lower)
i1 = Period('4Q1984')
self.assertEquals(i1.year, 1984)
lower = Period('4q1984')
self.assertEquals(i1, lower)
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
self.assertEquals(i1, i2)
i2 = Period('1982', freq=('Min', 1))
self.assertEquals(i1, i2)
def test_freq_str(self):
i1 = Period('1982', freq='Min')
self.assert_(i1.freq[0] != '1')
i2 = Period('11/30/2005', freq='2Q')
self.assertEquals(i2.freq[0], '2')
def test_to_timestamp(self):
intv = Period('1982', freq='A')
start_ts = intv.to_timestamp(which_end='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
self.assertEquals(start_ts, intv.to_timestamp(which_end=a))
end_ts = intv.to_timestamp(which_end='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
self.assertEquals(end_ts, intv.to_timestamp(which_end=a))
from_lst = ['A', 'Q', 'M', 'W', 'B',
'D', 'H', 'Min', 'S']
for i, fcode in enumerate(from_lst):
intv = Period('1982', freq=fcode)
result = intv.to_timestamp().to_period(fcode)
self.assertEquals(result, intv)
self.assertEquals(intv.start_time(), intv.to_timestamp('S'))
self.assertEquals(intv.end_time(), intv.to_timestamp('E'))
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert_equal(a_date.year, 2007)
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert_equal((qd + x).qyear, 2007)
assert_equal((qd + x).quarter, x + 1)
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert_equal(m_ival_x.year, 2007)
if 1 <= x + 1 <= 3:
assert_equal(m_ival_x.quarter, 1)
elif 4 <= x + 1 <= 6:
assert_equal(m_ival_x.quarter, 2)
elif 7 <= x + 1 <= 9:
assert_equal(m_ival_x.quarter, 3)
elif 10 <= x + 1 <= 12:
assert_equal(m_ival_x.quarter, 4)
assert_equal(m_ival_x.month, x + 1)
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='WK', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert_equal(b_date.year, 2007)
assert_equal(b_date.quarter, 1)
assert_equal(b_date.month, 1)
assert_equal(b_date.day, 1)
assert_equal(b_date.weekday, 0)
assert_equal(b_date.day_of_year, 1)
#
d_date = Period(freq='D', year=2007, month=1, day=1)
#
assert_equal(d_date.year, 2007)
assert_equal(d_date.quarter, 1)
assert_equal(d_date.month, 1)
assert_equal(d_date.day, 1)
assert_equal(d_date.weekday, 0)
assert_equal(d_date.day_of_year, 1)
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date = Period(freq='H', year=2007, month=1, day=1, hour=0)
#
assert_equal(h_date.year, 2007)
assert_equal(h_date.quarter, 1)
assert_equal(h_date.month, 1)
assert_equal(h_date.day, 1)
assert_equal(h_date.weekday, 0)
assert_equal(h_date.day_of_year, 1)
assert_equal(h_date.hour, 0)
#
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert_equal(t_date.quarter, 1)
assert_equal(t_date.month, 1)
assert_equal(t_date.day, 1)
assert_equal(t_date.weekday, 0)
assert_equal(t_date.day_of_year, 1)
assert_equal(t_date.hour, 0)
assert_equal(t_date.minute, 0)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
#
assert_equal(s_date.year, 2007)
assert_equal(s_date.quarter, 1)
assert_equal(s_date.month, 1)
assert_equal(s_date.day, 1)
assert_equal(s_date.weekday, 0)
assert_equal(s_date.day_of_year, 1)
assert_equal(s_date.hour, 0)
assert_equal(s_date.minute, 0)
assert_equal(s_date.second, 0)
def noWrap(item):
return item
class TestFreqConversion(TestCase):
"Test frequency conversion of date objects"
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='WK', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
assert_equal(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start)
assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
assert_equal(ival_A.asfreq('WK', 'S'), ival_A_to_W_start)
assert_equal(ival_A.asfreq('WK', 'E'), ival_A_to_W_end)
assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
assert_equal(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
assert_equal(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
assert_equal(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
assert_equal(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
assert_equal(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
assert_equal(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
assert_equal(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
assert_equal(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
assert_equal(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
assert_equal(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
assert_equal(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
assert_equal(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='WK', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
assert_equal(ival_Q.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
assert_equal(ival_Q.asfreq('WK', 'S'), ival_Q_to_W_start)
assert_equal(ival_Q.asfreq('WK', 'E'), ival_Q_to_W_end)
assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
assert_equal(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
assert_equal(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
assert_equal(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
assert_equal(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
assert_equal(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
assert_equal(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
assert_equal(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
assert_equal(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
assert_equal(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
assert_equal(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
assert_equal(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
assert_equal(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='WK', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
assert_equal(ival_M.asfreq('A'), ival_M_to_A)
assert_equal(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
assert_equal(ival_M.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M.asfreq('WK', 'S'), ival_M_to_W_start)
assert_equal(ival_M.asfreq('WK', 'E'), ival_M_to_W_end)
assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
assert_equal(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
assert_equal(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
assert_equal(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
assert_equal(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
assert_equal(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
assert_equal(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
assert_equal(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
assert_equal(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='WK', year=2007, month=1, day=1)
ival_WSUN = Period(freq='WK', year=2007, month=1, day=7)
ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='WK', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='WK', year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq='WK', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
assert_equal(ival_W.asfreq('A'), ival_W_to_A)
assert_equal(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
assert_equal(ival_W.asfreq('Q'), ival_W_to_Q)
assert_equal(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
assert_equal(ival_W.asfreq('M'), ival_W_to_M)
assert_equal(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
assert_equal(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
assert_equal(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
assert_equal(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
assert_equal(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
assert_equal(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
assert_equal(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
assert_equal(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
assert_equal(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
assert_equal(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
assert_equal(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
assert_equal(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
assert_equal(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
assert_equal(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
assert_equal(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
assert_equal(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
assert_equal(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
assert_equal(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
assert_equal(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
assert_equal(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
assert_equal(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
assert_equal(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
assert_equal(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
assert_equal(ival_W.asfreq('WK'), ival_W)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq='B', year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5)
ival_B_to_A = | Period(freq='A', year=2007) | pandas.tseries.period.Period |
import numpy as np
import pandas as pd
from typing import Dict
from typing import Tuple
from problem import get_train_data
def stormid_dict(X_df: pd.DataFrame) -> Dict[str, pd.DataFrame]:
"""
Partitions the storm forecast dataset into separate groups for each storm and
returns the result as a dictionary.
"""
groups = X_df.groupby(['stormid'])
storm_dict = dict()
for stormid, df in groups:
storm_dict[stormid] = df
return storm_dict
def feature_groups(X_df: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""
Partitions X_df into three groups by columns:
1) 0-D features
2) 11x11 z, u, v wind reanalysis data
3) 11x11 sst, slp, humidity, and vorticity reanalysis data
"""
feat_cols = X_df.get(['instant_t', 'windspeed', 'latitude', 'longitude','hemisphere','Jday_predictor','initial_max_wind','max_wind_change_12h','dist2land'])
nature_cols = pd.get_dummies(X_df.nature, prefix='nature', drop_first=True)
basin_cols = | pd.get_dummies(X_df.basin, prefix='basin', drop_first=True) | pandas.get_dummies |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 28 15:43:52 2021
@author: bw98j
"""
import prose as pgx
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import seaborn as sns
import numpy as np
import itertools
import glob
import os
from tqdm import tqdm
import scipy.stats
import gtfparse
import itertools
from pylab import *
import collections
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import quantile_transform
import pickle
import re
#plot parameters
plt.rcParams['mathtext.fontset'] = 'custom'
plt.rcParams['mathtext.it'] = 'Arial:italic'
plt.rcParams['mathtext.rm'] = 'Arial'
plt.rc('font',family='arial',size=40)
plt.rc('hatch',linewidth = 2.0)
tpm = pd.read_csv('ccle/ccle_tpm_formatted.tsv.gz', sep='\t').drop_duplicates('cell_line')
tmt = | pd.read_csv('ccle/ccle_tmt_formatted_withMissingVals.tsv.gz', sep='\t') | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.