prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 8 22:58:48 2021
@author: laura.gf
"""
import requests
from requests.exceptions import HTTPError
import time
from dateutil.relativedelta import relativedelta
from datetime import datetime
import pandas as pd
import sys
def query_entry_pt(url):
"""This function takes as input a URL entry point and returns the complete JSON response in a REST API
Input:
- url(string): complete url (or entry point) pointing at server
Output:
- jsonResponse(json object): JSON response associated wtih query
"""
try:
# Time query
start_time = time.time()
# Using GET command
response = requests.get(url)
total_time = time.time() - start_time
# Raise issues if response is different from 200
response.raise_for_status()
# access JSOn content
jsonResponse = response.json()
return [jsonResponse,total_time]
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
def format_json_resp(json_resp,query_time,record_path_field,date,time,output_dir,base_url,entry_pt):
"""
This function takes query result in JSON format and saves a CSV with results
Parameters
----------
json_resp : JSON object
content of query.
query_time : string
time it took for the query to complete.
record_path_field : string
level necessary to flatten JSON.
date : datetime object
date of query.
time : datetime object
time of query.
output_dir : string
path to directory where CSV query results will be stored.
base_url : string
URL pointing at REST API.
entry_pt : string
complete query type.
Returns
-------
None.
"""
df = | pd.json_normalize(json_resp,record_path=record_path_field) | pandas.json_normalize |
from datetime import datetime, time, timedelta
from pandas.compat import range
import sys
import os
import nose
import numpy as np
from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range
import pandas.tseries.frequencies as frequencies
from pandas.tseries.tools import to_datetime
import pandas.tseries.offsets as offsets
from pandas.tseries.period import PeriodIndex
import pandas.compat as compat
from pandas.compat import is_platform_windows
import pandas.util.testing as tm
from pandas import Timedelta
def test_to_offset_multiple():
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert(result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert(result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert(result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert(result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert(result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert(result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert(result == expected)
# malformed
try:
frequencies.to_offset('2h20m')
except ValueError:
pass
else:
assert(False)
def test_to_offset_negative():
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert(result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert(result.n == -310)
def test_to_offset_leading_zero():
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert(result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert(result.n == -194)
def test_to_offset_pd_timedelta():
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert(expected==result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert(expected==result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert(expected==result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert(expected==result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert(expected==result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert(result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert(expected == result)
td = Timedelta(microseconds=0)
tm.assertRaises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts():
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert(result == expected)
result1 = frequencies.to_offset('Q')
result2 = frequencies.to_offset('Q-DEC')
expected = offsets.QuarterEnd(startingMonth=12)
assert(result1 == expected)
assert(result2 == expected)
result1 = frequencies.to_offset('Q-MAY')
expected = offsets.QuarterEnd(startingMonth=5)
assert(result1 == expected)
def test_get_rule_month():
result = frequencies._get_rule_month('W')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Week())
assert(result == 'DEC')
result = frequencies._get_rule_month('D')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Day())
assert(result == 'DEC')
result = frequencies._get_rule_month('Q')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=12))
print(result == 'DEC')
result = frequencies._get_rule_month('Q-JAN')
assert(result == 'JAN')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=1))
assert(result == 'JAN')
result = frequencies._get_rule_month('A-DEC')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.YearEnd())
assert(result == 'DEC')
result = frequencies._get_rule_month('A-MAY')
assert(result == 'MAY')
result = frequencies._get_rule_month(offsets.YearEnd(month=5))
assert(result == 'MAY')
class TestFrequencyCode(tm.TestCase):
def test_freq_code(self):
self.assertEqual(frequencies.get_freq('A'), 1000)
self.assertEqual(frequencies.get_freq('3A'), 1000)
self.assertEqual(frequencies.get_freq('-1A'), 1000)
self.assertEqual(frequencies.get_freq('W'), 4000)
self.assertEqual(frequencies.get_freq('W-MON'), 4001)
self.assertEqual(frequencies.get_freq('W-FRI'), 4005)
for freqstr, code in compat.iteritems(frequencies._period_code_map):
result = frequencies.get_freq(freqstr)
self.assertEqual(result, code)
result = frequencies.get_freq_group(freqstr)
self.assertEqual(result, code // 1000 * 1000)
result = frequencies.get_freq_group(code)
self.assertEqual(result, code // 1000 * 1000)
def test_freq_group(self):
self.assertEqual(frequencies.get_freq_group('A'), 1000)
self.assertEqual(frequencies.get_freq_group('3A'), 1000)
self.assertEqual(frequencies.get_freq_group('-1A'), 1000)
self.assertEqual(frequencies.get_freq_group('A-JAN'), 1000)
self.assertEqual(frequencies.get_freq_group('A-MAY'), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd()), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=1)), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=5)), 1000)
self.assertEqual(frequencies.get_freq_group('W'), 4000)
self.assertEqual(frequencies.get_freq_group('W-MON'), 4000)
self.assertEqual(frequencies.get_freq_group('W-FRI'), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week()), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=1)), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=5)), 4000)
def test_get_to_timestamp_base(self):
tsb = frequencies.get_to_timestamp_base
self.assertEqual(tsb(frequencies.get_freq_code('D')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('W')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('M')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('S')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('T')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('H')[0]),
frequencies.get_freq_code('S')[0])
def test_freq_to_reso(self):
Reso = frequencies.Resolution
self.assertEqual(Reso.get_str_from_freq('A'), 'year')
self.assertEqual(Reso.get_str_from_freq('Q'), 'quarter')
self.assertEqual(Reso.get_str_from_freq('M'), 'month')
self.assertEqual(Reso.get_str_from_freq('D'), 'day')
self.assertEqual(Reso.get_str_from_freq('H'), 'hour')
self.assertEqual(Reso.get_str_from_freq('T'), 'minute')
self.assertEqual(Reso.get_str_from_freq('S'), 'second')
self.assertEqual(Reso.get_str_from_freq('L'), 'millisecond')
self.assertEqual(Reso.get_str_from_freq('U'), 'microsecond')
self.assertEqual(Reso.get_str_from_freq('N'), 'nanosecond')
for freq in ['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U', 'N']:
# check roundtrip
result = Reso.get_freq(Reso.get_str_from_freq(freq))
self.assertEqual(freq, result)
for freq in ['D', 'H', 'T', 'S', 'L', 'U']:
result = Reso.get_freq(Reso.get_str(Reso.get_reso_from_freq(freq)))
self.assertEqual(freq, result)
def test_get_freq_code(self):
# freqstr
self.assertEqual(frequencies.get_freq_code('A'),
(frequencies.get_freq('A'), 1))
self.assertEqual(frequencies.get_freq_code('3D'),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code('-2M'),
(frequencies.get_freq('M'), -2))
# tuple
self.assertEqual(frequencies.get_freq_code(('D', 1)),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(('A', 3)),
(frequencies.get_freq('A'), 3))
self.assertEqual(frequencies.get_freq_code(('M', -2)),
(frequencies.get_freq('M'), -2))
# numeric tuple
self.assertEqual( | frequencies.get_freq_code((1000, 1)) | pandas.tseries.frequencies.get_freq_code |
import pandas as pd
import numpy as np
from itertools import chain
import requests
def read_cnv(inputfile):
"""Function to read CNV input file.
input: _somatic_cnv.tsv
output: dataframe"""
def convert_to_int(row):
if row['chr'].lower() in ["x", "y"]:
return row["chr"]
elif row['chr'] == "null":
return row["chr"]
else:
return int(row["chr"])
dataframe = pd.read_csv(inputfile, sep="\t")
dataframe["var_type"] = "cnv"
dataframe.fillna("null", inplace=True)
dataframe["chr"] = dataframe["chr"].str.replace("chr", "")
# convert chromosome number to int.
dataframe["chr"] = dataframe.apply(convert_to_int, axis=1)
return dataframe
def chainer(df_series):
"""Function to return flattened list of splitted values. It is used in parse_cnv(), to repeate the rows for splitted values."""
return list(chain.from_iterable(df_series.str.split(';')))
def split_effect(row):
"""Function to return gene's effect via parsing the effect column of the input dataframe. It is used in parse_cnv() to retrieve the corresponding effect of the gene given in the gene column after ; split."""
effect = row["effect"].split(";")
if row["effect"] == "null":
val = "null"
elif row["gene"] in row["effect"]:
for e in effect:
if row['gene'] in e:
val = e
else:
continue
else:
val = "null"
return val
# room for improvement: make request calls parallel, it takes long time
def get_hgnc_id(dataframe):
"""Function to retrieve HGNC IDs via HGNC REST API"""
def hgnc_to_str(row):
if isinstance(row["HGNC_ID"], float):
return str(int(row["HGNC_ID"]))
elif isinstance(row["HGNC_ID"], int):
return str(row["HGNC_ID"])
else:
return row["HGNC_ID"]
if "HGNC_ID" in dataframe.columns:
dataframe["HGNC_ID"] = dataframe.apply(hgnc_to_str, axis=1)
else:
for index, row in dataframe.iterrows():
if row["gene"] == "null":
dataframe.at[index, 'HGNC_ID'] = "null"
continue
url = "http://rest.genenames.org//search/symbol/{}".format(
row['gene'])
response = requests.get(
url, headers={'Accept': 'application/json'})
if response.status_code == 200 and response.json()["response"]["numFound"] > 0:
value = response.json()[
"response"]["docs"][0]["hgnc_id"].strip("HGNC: ")
print(value)
dataframe.at[index, 'HGNC_ID'] = value
else:
dataframe.at[index, 'HGNC_ID'] = "null"
return dataframe
# room for improvement: check for the column names before processing, it might change.
def parse_cnv(dataframe):
"""Function to process input cnv file. split gene values seperated by ;, repeat rows and reshape the dataframe, get the effect of the splitted gene via parding the effect column, retrieving hgnc ids via hgnc rest api. """
# get repetations based on split
lengths = dataframe['gene'].str.split(';').map(len)
# reshape dataframe
reshaped_data = {'size': np.repeat(dataframe['size'], lengths), 'type': np.repeat(dataframe['type'], lengths), 'copy_number': np.repeat(
dataframe['copy_number'], lengths), 'gene': chainer(dataframe['gene']), 'exons': np.repeat(dataframe['exons'], lengths),
'transcript': np.repeat(dataframe['transcript'], lengths), 'chr': np.repeat(dataframe['chr'], lengths),
'start': np.repeat(dataframe['start'], lengths), 'end': np.repeat(dataframe['end'], lengths),
'effect': np.repeat(dataframe['effect'], lengths), 'var_type': np.repeat(dataframe['var_type'], lengths)}
if "HGNC_ID" in dataframe.columns:
reshaped_data['HGNC_ID'] = np.repeat(dataframe['HGNC_ID'], lengths)
reshaped_dataframe = | pd.DataFrame(reshaped_data) | pandas.DataFrame |
import pandas as pd
import numpy as np
from xgboost import XGBRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn import metrics
import os
import sys
import itertools
from pathlib import Path
import pickle
import logging
import time
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG,)
logger = logging.getLogger(__name__)
#model training
def train_and_score(queries,model,dims, **param_grid):
X_train, X_test, y_train, y_test = train_test_split(
queries[:,:queries.shape[1]-1], queries[:,-1], test_size=0.2, random_state=0)
if param_grid:
m = GridSearchCV(model(), cv=3,n_jobs=6,
param_grid= param_grid)
else:
m=model()
m.fit(X_train, y_train)
pred = m.predict(X_test)
rmse = np.sqrt(metrics.mean_squared_error(y_test, pred))
r2 = metrics.r2_score(y_test, pred)
print("RMSE : {}".format(rmse))
print("Fitting : {}".format(r2))
return m, rmse, r2
#Save models
xgb_parameters = { "learning_rate": 10.0**-np.arange(1,4),
"max_depth" : np.arange(3,10,2),
"n_estimators": [100, 200, 300],
"reg_lambda": 10.0**-np.arange(0,4)
}
file = os.fsencode('input/queries/queries-uniform-5-multi_True-density')
filename = os.fsdecode(file)
f = np.loadtxt(filename ,delimiter=',')
cols = [1,2,3]
print(f.shape)
queries_num = np.linspace(10000, f.shape[0],num=10).astype(int)
print(queries_num)
training_overhead = {}
training_overhead['time'] = []
training_overhead['queries'] = []
training_overhead['hypertuning'] = []
for no in queries_num:
X = f[:no,:]
logger.debug("File : {0}".format(X.shape[0]))
start = time.time()
m,RMSE,R2 = train_and_score(X,XGBRegressor, X.shape[1], **xgb_parameters)
end = time.time()-start
training_overhead['time'].append(end)
training_overhead['queries'].append(no)
training_overhead['hypertuning'].append(True)
for i in range(5):
start = time.time()
m,RMSE,R2 = train_and_score(X,XGBRegressor, X.shape[1])
end = time.time()-start
training_overhead['time'].append(end)
training_overhead['queries'].append(no)
training_overhead['hypertuning'].append(False)
df = | pd.DataFrame(training_overhead) | pandas.DataFrame |
## Packages.
import pandas, os, tqdm
## Group of table of data.
group = []
for mode in ['train', 'test']:
if(mode=='train'):
## Load table.
table = pandas.read_csv("../DATA/BMSMT/TRAIN/CSV/LABEL.csv")
table['mode'] = 'train'
## Information.
folder = "../DATA/BMSMT/TRAIN/PNG/"
table['image'] = [folder + i[0] + '/' + i[1] + '/' + i[2] + '/' + i + '.png' for i in table['image_id']]
table['length'] = [len(i) for i in table['InChI']]
## Append to group.
group += [table]
pass
if(mode=='test'):
## Load table.
table = | pandas.read_csv("../DATA/BMSMT/TEST/CSV/LABEL.csv") | pandas.read_csv |
import types
from functools import wraps
import numpy as np
import datetime
import collections
from pandas.compat import(
zip, builtins, range, long, lzip,
OrderedDict, callable
)
from pandas import compat
from pandas.core.base import PandasObject
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.index import Index, MultiIndex, _ensure_index, _union_indexes
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.util.decorators import cache_readonly, Appender
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.common import(_possibly_downcast_to_dtype, isnull,
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object)
from pandas.core.config import option_context
from pandas import _np_version_under1p7
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas.tslib as tslib
import pandas.algos as _algos
import pandas.hashtable as _hash
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a DataFrame or when passed to DataFrame.apply. If
passed a dict, the keys must be DataFrame column names.
Notes
-----
Numpy functions mean/median/prod/sum/std/var are special cased so the
default behavior is applying the function along axis=0
(e.g., np.mean(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.mean(arr_2d)).
Returns
-------
aggregated : DataFrame
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
'rank', 'quantile', 'count',
'fillna',
'mad',
'any', 'all',
'irow', 'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = \
(_common_apply_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'value_counts', 'unique', 'nunique',
'nlargest', 'nsmallest'])
_dataframe_apply_whitelist = \
_common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
def _groupby_function(name, alias, npfunc, numeric_only=True,
_convert=False):
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result.convert_objects()
return result
f.__doc__ = "Compute %s of group values" % name
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(_first, axis=axis)
else:
return _first(x)
def _last_compat(x, axis=0):
def _last(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(_last, axis=axis)
else:
return _last(x)
def _count_compat(x, axis=0):
try:
return x.size
except:
return x.count()
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target object
This specification will select a column via the key parameter, or if the level and/or
axis parameters are given, a level of the index of the target object.
These are local specifications and will override 'global' settings, that is the parameters
axis and level which are passed to the groupby itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / freqency object, defaults to None
This will groupby the specified frequency if the target selection (via key or level) is
a datetime-like object
axis : number/name of the axis, defaults to None
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a groupby instruction
Examples
--------
>>> df.groupby(Grouper(key='A')) : syntatic sugar for df.groupby('A')
>>> df.groupby(Grouper(key='date',freq='60s')) : specify a resample on the column 'date'
>>> df.groupby(Grouper(level='date',freq='60s',axis=1)) :
specify a resample on the level 'date' on the columns axis with a frequency of 60s
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.tseries.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=None, sort=False):
self.key=key
self.level=level
self.freq=freq
self.axis=axis
self.sort=sort
self.grouper=None
self.obj=None
self.indexer=None
self.binner=None
self.grouper=None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifcations, setup the internal grouper for this particular specification
Parameters
----------
obj : the subject object
"""
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".format(key))
ax = Index(obj[key],name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
ax = Index(ax.get_level_values(level), name=level)
else:
if not (level == 0 or level == ax.name):
raise ValueError("The grouper level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
indexer = self.indexer = ax.argsort(kind='quicksort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis, convert=False, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _get_binner_for_grouping(self, obj):
raise NotImplementedError
@property
def groups(self):
return self.grouper.groups
class GroupBy(PandasObject):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
_apply_whitelist = _common_apply_whitelist
_internal_names = ['_cache']
_internal_names_set = set(_internal_names)
_group_selection = None
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
if grouper is None:
grouper, exclusions, obj = _get_grouper(obj, keys, axis=axis,
level=level, sort=sort)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self):
return len(self.indices)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
@property
def groups(self):
""" dict {group name -> group labels} """
return self.grouper.groups
@property
def ngroups(self):
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
return self.grouper.indices
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
def convert(key, s):
# possibly convert to they actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp,datetime.datetime)):
return Timestamp(key)
elif isinstance(s, np.datetime64):
return Timestamp(key).asm8
return key
sample = next(iter(self.indices))
if isinstance(sample, tuple):
if not isinstance(name, tuple):
raise ValueError("must supply a tuple to get_group with multiple grouping keys")
if not len(name) == len(sample):
raise ValueError("must supply a a same-length tuple to get_group with multiple grouping keys")
name = tuple([ convert(n, k) for n, k in zip(name,sample) ])
else:
name = convert(name, sample)
return self.indices[name]
@property
def name(self):
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, Series, Index, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _set_selection_from_grouper(self):
""" we may need create a selection if we have non-level groupers """
grp = self.grouper
if self.as_index and getattr(grp,'groupings',None) is not None and self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [ g.name for g in grp.groupings if g.level is None and g.name is not None and g.name in ax ]
if len(groupers):
self._group_selection = (ax-Index(groupers)).tolist()
def _local_dir(self):
return sorted(set(self.obj._local_dir() + list(self._apply_whitelist)))
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def __getitem__(self, key):
raise NotImplementedError('Not implemented: %s' % key)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_selection_from_grouper()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise ValueError if
# we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name, *args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
def apply(self, func, *args, **kwargs):
"""
Apply function and combine results together in an intelligent way. The
split-apply-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group DataFrame
apply aggregation function (f(chunk) -> Series)
yield DataFrame, with group axis having group labels
case 2:
group DataFrame
apply transform function ((f(chunk) -> DataFrame with same indexes)
yield DataFrame with resulting chunks glued together
case 3:
group Series
apply function with f(chunk) -> DataFrame
yield DataFrame with result of chunks glued together
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use apply.
In the current implementation apply calls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform
Returns
-------
applied : type depending on grouped object and function
"""
func = _intercept_function(func)
@wraps(func)
def f(g):
return func(g, *args, **kwargs)
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment',None):
return self._python_apply_general(f)
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(keys, values,
not_indexed_same=mutated)
def aggregate(self, func, *args, **kwargs):
raise NotImplementedError
@Appender(_agg_doc)
def agg(self, func, *args, **kwargs):
return self.aggregate(func, *args, **kwargs)
def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise NotImplementedError
def mean(self):
"""
Compute mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('mean')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
f = lambda x: x.mean(axis=self.axis)
return self._python_agg_general(f)
def median(self):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis)
return self._python_agg_general(f)
def std(self, ddof=1):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
# todo, implement at cython level?
return np.sqrt(self.var(ddof=ddof))
def var(self, ddof=1):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
if ddof == 1:
return self._cython_agg_general('var')
else:
self._set_selection_from_grouper()
f = lambda x: x.var(ddof=ddof)
return self._python_agg_general(f)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self.std(ddof=ddof)/np.sqrt(self.count())
def size(self):
"""
Compute group sizes
"""
return self.grouper.size()
sum = _groupby_function('sum', 'add', np.sum)
prod = _groupby_function('prod', 'prod', np.prod)
min = _groupby_function('min', 'min', np.min, numeric_only=False)
max = _groupby_function('max', 'max', np.max, numeric_only=False)
first = _groupby_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
last = _groupby_function('last', 'last', _last_compat, numeric_only=False,
_convert=True)
_count = _groupby_function('_count', 'count', _count_compat,
numeric_only=False)
def count(self, axis=0):
return self._count().astype('int64')
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
def nth(self, n, dropna=None):
"""
Take the nth row from each group.
If dropna, will not show nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame); this is equivalent
to calling dropna(how=dropna) before the groupby.
Examples
--------
>>> df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
A B
0 1 NaN
2 5 6
>>> g.nth(1)
A B
1 1 4
>>> g.nth(-1)
A B
1 1 4
2 5 6
>>> g.nth(0, dropna='any')
B
A
1 4
5 6
>>> g.nth(1, dropna='any') # NaNs denote group exhausted when using dropna
B
A
1 NaN
5 NaN
"""
self._set_selection_from_grouper()
if not dropna: # good choice
m = self.grouper._max_groupsize
if n >= m or n < -m:
return self._selected_obj.loc[[]]
rng = np.zeros(m, dtype=bool)
if n >= 0:
rng[n] = True
is_nth = self._cumcount_array(rng)
else:
rng[- n - 1] = True
is_nth = self._cumcount_array(rng, ascending=False)
result = self._selected_obj[is_nth]
# the result index
if self.as_index:
ax = self.obj._info_axis
names = self.grouper.names
if self.obj.ndim == 1:
# this is a pass-thru
pass
elif all([ n in ax for n in names ]):
result.index = Index(self.obj[names][is_nth].values.ravel()).set_names(names)
elif self._group_selection is not None:
result.index = self.obj._get_axis(self.axis)[is_nth]
result = result.sort_index()
return result
if (isinstance(self._selected_obj, DataFrame)
and dropna not in ['any', 'all']):
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
keys = self.grouper.names
else:
# create a grouper with the original parameters, but on the dropped object
grouper, _, _ = _get_grouper(dropped, key=self.keys, axis=self.axis,
level=self.level, sort=self.sort)
sizes = dropped.groupby(grouper).size()
result = dropped.groupby(grouper).nth(n)
mask = (sizes<max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
def cumcount(self, **kwargs):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Example
-------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
self._set_selection_from_grouper()
ascending = kwargs.pop('ascending', True)
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
in_head = self._cumcount_array() < n
head = obj[in_head]
return head
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).tail(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
rng = np.arange(0, -self.grouper._max_groupsize, -1, dtype='int64')
in_tail = self._cumcount_array(rng, ascending=False) > -n
tail = obj[in_tail]
return tail
def _cumcount_array(self, arr=None, **kwargs):
"""
arr is where cumcount gets its values from
note: this is currently implementing sort=False (though the default is sort=True)
for groupby in general
"""
ascending = kwargs.pop('ascending', True)
if arr is None:
arr = np.arange(self.grouper._max_groupsize, dtype='int64')
len_index = len(self._selected_obj.index)
cumcounts = np.zeros(len_index, dtype=arr.dtype)
if not len_index:
return cumcounts
indices, values = [], []
for v in self.indices.values():
indices.append(v)
if ascending:
values.append(arr[:len(v)])
else:
values.append(arr[len(v)-1::-1])
indices = np.concatenate(indices)
values = np.concatenate(values)
cumcounts[indices] = values
return cumcounts
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from apply, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(len(gp.groupings))),
(original.get_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not np.isscalar(result):
result = _possibly_downcast_to_dtype(result, dtype)
return result
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = _intercept_function(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise NotImplementedError
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.tools.merge import concat
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
result = result.reindex_axis(ax, axis=self.axis)
elif self.group_keys:
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
result = concat(values, axis=self.axis)
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = []
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True):
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.compressed = True
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
return [mapper.get_key(i) for i in range(ngroups)]
def apply(self, f, data, axis=0):
mutated = False
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except (Exception) as e:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index) for ping in self.groupings]
return _get_indices_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
# TODO: better impl
labels, _, ngroups = self.group_info
bin_counts = algos.value_counts(labels, sort=False)
bin_counts = bin_counts.reindex(np.arange(ngroups))
bin_counts.index = self.result_index
return bin_counts
@cache_readonly
def _max_groupsize(self):
'''
Compute size of largest group
'''
# For many items in each group this is much faster than
# self.size().max(), in worst case marginally slower
if self.indices:
return max(len(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby.values)
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = com._ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if self._overflow_possible:
tups = lib.fast_zip(all_labels)
labs, uniques = algos.factorize(tups)
if self.sort:
uniques, labs = _reorder_by_uniques(uniques, labs)
return labs, uniques
else:
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape)
comp_ids, obs_group_ids = _compress_group_index(group_index)
else:
ping = self.groupings[0]
comp_ids = ping.labels
obs_group_ids = np.arange(len(ping.group_index))
self.compressed = False
self._filter_empty_groups = False
return comp_ids, obs_group_ids
@cache_readonly
def _overflow_possible(self):
return _int64_overflow_possible(self.shape)
@cache_readonly
def ngroups(self):
return len(self.result_index)
@cache_readonly
def result_index(self):
recons = self.get_group_levels()
return MultiIndex.from_arrays(recons, names=self.names)
def get_group_levels(self):
obs_ids = self.group_info[1]
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].group_index]
if self._overflow_possible:
recons_labels = [np.array(x) for x in zip(*obs_ids)]
else:
recons_labels = decons_group_index(obs_ids, self.shape)
name_list = []
for ping, labels in zip(self.groupings, recons_labels):
labels = com._ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.append(levels)
return name_list
#------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last',
'count': 'group_count',
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {}
_filter_empty_groups = True
def _get_aggregate_function(self, how, values):
dtype_str = values.dtype.name
def get_func(fname):
# find the function, or use the object function, or return a
# generic
for dt in [dtype_str, 'object']:
f = getattr(_algos, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
return getattr(_algos, fname, None)
ftype = self._cython_functions[how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def aggregate(self, values, how, axis=0):
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError
out_shape = (self.ngroups,) + values.shape[1:]
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
is_numeric = True
out_dtype = 'f%d' % values.dtype.itemsize
else:
is_numeric = issubclass(values.dtype.type, (np.datetime64,
np.timedelta64))
if is_numeric:
out_dtype = 'float64'
values = values.view('int64')
else:
out_dtype = 'object'
values = values.astype(object)
# will be filled in Cython function
result = np.empty(out_shape, dtype=out_dtype)
result.fill(np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(result, counts, values, how, is_numeric)
if self._filter_empty_groups:
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
result, (counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def _aggregate(self, result, counts, values, how, is_numeric):
agg_func, dtype = self._get_aggregate_function(how, values)
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = _intercept_function(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = _algos.groupsort_indexer(group_index, ngroups)[0]
obj = obj.take(indexer, convert=False)
group_index = com.take_nd(group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray)) or
isinstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False):
self.bins = com._ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not tslib.NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start,edge: data[slice(start,edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not tslib.NaT:
yield label, slicer(start,edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start,None)
def apply(self, f, data, axis=0):
result_keys = []
result_values = []
mutated = False
for key, group in self.get_iterator(data, axis=axis):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_keys.append(key)
result_values.append(res)
return result_keys, result_values, mutated
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not tslib.NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def ngroups(self):
return len(self.binlabels)
@cache_readonly
def result_index(self):
mask = self.binlabels.asi8 == tslib.iNaT
return self.binlabels[~mask]
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
# for compat
return None
def size(self):
"""
Compute group sizes
"""
base = Series(np.zeros(len(self.result_index), dtype=np.int64),
index=self.result_index)
indices = self.indices
for k, v in compat.iteritems(indices):
indices[k] = len(v)
bin_counts = Series(indices, dtype=np.int64)
result = base.add(bin_counts, fill_value=0)
# addition with fill_value changes dtype to float64
result = result.astype(np.int64)
return result
#----------------------------------------------------------------------
# cython aggregation
_cython_functions = {
'add': 'group_add_bin',
'prod': 'group_prod_bin',
'mean': 'group_mean_bin',
'min': 'group_min_bin',
'max': 'group_max_bin',
'var': 'group_var_bin',
'ohlc': 'group_ohlc',
'first': {
'name': 'group_nth_bin',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last_bin',
'count': 'group_count_bin',
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
_filter_empty_groups = True
def _aggregate(self, result, counts, values, how, is_numeric=True):
agg_func, dtype = self._get_aggregate_function(how, values)
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
agg_func(result[:, :, i], counts, chunk, self.bins)
else:
agg_func(result, counts, values, self.bins)
return result
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
class Grouping(object):
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._was_factor = False
self._should_compress = True
# we have a single grouper which may be a myriad of things, some of which are
# dependent on the passing in level
#
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
inds = index.labels[level]
level_index = index.levels[level]
if self.name is None:
self.name = index.names[level]
# XXX complete hack
if grouper is not None:
level_values = index.levels[level].take(inds)
self.grouper = level_values.map(self.grouper)
else:
self._was_factor = True
# all levels may not be observed
labels, uniques = algos.factorize(inds, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# handle NAs
mask = inds != -1
ok_labels, uniques = algos.factorize(inds[mask], sort=True)
labels = np.empty(len(inds), dtype=inds.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if len(uniques) < len(level_index):
level_index = level_index.take(uniques)
self._labels = labels
self._group_index = level_index
self.grouper = level_index.take(labels)
else:
if isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif isinstance(self.grouper, Categorical):
factor = self.grouper
self._was_factor = True
# Is there any way to avoid this?
self.grouper = np.asarray(factor)
self._labels = factor.codes
self._group_index = factor.levels
if self.name is None:
self.name = factor.name
# a passed Grouper like
elif isinstance(self.grouper, Grouper):
# get the new grouper
grouper = self.grouper._get_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# no level passed
if not isinstance(self.grouper, (Series, Index, np.ndarray)):
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
errmsg = ('Grouper result violates len(labels) == '
'len(data)\nresult: %s' %
com.pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have Timestamps like
if getattr(self.grouper,'dtype',None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping(%s)' % self.name
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return len(self.group_index)
@cache_readonly
def indices(self):
return _groupby_indices(self.grouper)
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._was_factor: # pragma: no cover
raise Exception('Should not call this method grouping by level')
else:
labels, uniques = algos.factorize(self.grouper, sort=self.sort)
uniques = Index(uniques, name=self.name)
self._labels = labels
self._group_index = uniques
_groups = None
@property
def groups(self):
if self._groups is None:
self._groups = self.index.groupby(self.grouper)
return self._groups
def _get_grouper(obj, key=None, axis=0, level=None, sort=True):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure of what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
group_axis = obj._get_axis(axis)
# validate thatthe passed level is compatible with the passed
# axis of the object
if level is not None:
if not isinstance(group_axis, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
level = None
key = group_axis
# a passed in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
if not isinstance(key, (tuple, list)):
keys = [key]
else:
keys = key
# what are we after, exactly?
match_axis_length = len(keys) == len(group_axis)
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
try:
if isinstance(obj, DataFrame):
all_in_columns = all(g in obj.columns for g in keys)
else:
all_in_columns = False
except Exception:
all_in_columns = False
if (not any_callable and not all_in_columns
and not any_arraylike and match_axis_length
and level is None):
keys = [com._asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
for i, (gpr, level) in enumerate(zip(keys, levels)):
name = None
try:
obj._data.items.get_loc(gpr)
in_axis = True
except Exception:
in_axis = False
if _is_label_like(gpr) or in_axis:
exclusions.append(gpr)
name = gpr
gpr = obj[gpr]
if isinstance(gpr, Categorical) and len(gpr) != len(obj):
errmsg = "Categorical grouper must have len(grouper) == len(data)"
raise AssertionError(errmsg)
ping = Grouping(group_axis, gpr, obj=obj, name=name, level=level, sort=sort)
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort)
return grouper, exclusions, obj
def _is_label_like(val):
return isinstance(val, compat.string_types) or np.isscalar(val)
def _convert_grouper(axis, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper.values
else:
return grouper.reindex(axis).values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise AssertionError('Grouper and axis must be same length')
return grouper
else:
return grouper
class SeriesGroupBy(GroupBy):
_apply_whitelist = _series_apply_whitelist
def aggregate(self, func_or_funcs, *args, **kwargs):
"""
Apply aggregation function or functions to groups, yielding most likely
Series but in some cases DataFrame depending on the output of the
aggregation function
Parameters
----------
func_or_funcs : function or list / dict of functions
List/dict of functions will produce DataFrame with column names
determined by the function names themselves (list) or the keys in
the dict
Notes
-----
agg is an alias for aggregate. Use it.
Examples
--------
>>> series
bar 1.0
baz 2.0
qot 3.0
qux 4.0
>>> mapper = lambda x: x[0] # first letter
>>> grouped = series.groupby(mapper)
>>> grouped.aggregate(np.sum)
b 3.0
q 7.0
>>> grouped.aggregate([np.sum, np.mean, np.std])
mean std sum
b 1.5 0.5 3
q 3.5 0.5 7
>>> grouped.agg({'result' : lambda x: x.mean() / x.std(),
... 'total' : np.sum})
result total
b 2.121 3
q 4.95 7
See also
--------
apply, transform
Returns
-------
Series or DataFrame
"""
if isinstance(func_or_funcs, compat.string_types):
return getattr(self, func_or_funcs)(*args, **kwargs)
if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs)
else:
cyfunc = _intercept_cython(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
return ret
def _aggregate_multiple_funcs(self, arg):
if isinstance(arg, dict):
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
if name in results:
raise SpecificationError('Function names must be unique, '
'found multiple named %s' % name)
results[name] = self.aggregate(func)
return DataFrame(results, columns=columns)
def _wrap_aggregated_output(self, output, names=None):
# sort of a kludge
output = output[self.name]
index = self.grouper.result_index
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self.name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self.name)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823
index = _get_index()
return DataFrame(values, index=index).stack()
if isinstance(values[0], (Series, dict)):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(), name=self.name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group and return
a Series with the transformed values
Parameters
----------
func : function
To apply to each group. Should return a Series with the same index
Examples
--------
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
Returns
-------
transformed : Series
"""
# if string function
if isinstance(func, compat.string_types):
return self._transform_fast(lambda : getattr(self, func)(*args, **kwargs))
# do we have a cython function
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
return self._transform_fast(cyfunc)
# reg transform
dtype = self._selected_obj.dtype
result = self._selected_obj.values.copy()
wrapper = lambda x: func(x, *args, **kwargs)
for i, (name, group) in enumerate(self):
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
# may need to astype
try:
common_type = np.common_type(np.array(res), result)
if common_type != result.dtype:
result = result.astype(common_type)
except:
pass
indexer = self._get_index(name)
result[indexer] = res
result = | _possibly_downcast_to_dtype(result, dtype) | pandas.core.common._possibly_downcast_to_dtype |
"""
Todo:
* Implement correct cfgstrs based on algorithm input
for cached computations.
* Go pandas all the way
Issues:
* errors when there is a word without any database vectors.
currently a weight of zero is hacked in
"""
from __future__ import absolute_import, division, print_function
import ibeis
import six
import vtool
import utool
import numpy as np
import numpy.linalg as npl # NOQA
import pandas as pd
from vtool import clustering2 as clustertool
from vtool import nearest_neighbors as nntool
from plottool import draw_func2 as df2
np.set_printoptions(precision=2)
pd.set_option('display.max_rows', 7)
pd.set_option('display.max_columns', 7)
pd.set_option('isplay.notebook_repr_html', True)
VEC_DIM = 128
VEC_COLUMNS = pd.Int64Index(range(VEC_DIM), name='vec')
def pandasify_dict1d(dict_, keys, val_name, series_name):
""" Turns dict into heirarchy of series """
key2_series = pd.Series(
{key: pd.Series(dict_[key], name=val_name,)
for key in keys},
index=keys, name=series_name)
return key2_series
def pandasify_dict2d(dict_, keys, key2_index, columns, series_name):
""" Turns dict into heirarchy of dataframes """
key2_df = pd.Series(
{key: pd.DataFrame(dict_[key], index=key2_index[key], columns=columns,)
for key in keys},
index=keys, name=series_name)
return key2_df
def make_annot_df(ibs):
"""
Creates a panda dataframe using an ibeis controller
>>> from ibeis.model.hots.smk.smk import * # NOQA
>>> ibs = ibeis.opendb('PZ_MTEST')
>>> annots_df = make_annot_df(ibs)
"""
aid_list = ibs.get_valid_aids()
kpts_list = ibs.get_annot_kpts(aid_list)
vecs_list = ibs.get_annot_desc(aid_list)
aid_series = pd.Series(aid_list, name='aid')
# TODO: this could be more pandas
kpts_df = | pd.DataFrame(kpts_list, index=aid_series, columns=['kpts']) | pandas.DataFrame |
import pandas as pd
import pickle
from sklearn.cluster import KMeans
import math
latlong_df = pd.read_csv("./latlongfinal.csv")
features = latlong_df.iloc[:, [2,3]]
new_features = | pd.DataFrame() | pandas.DataFrame |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import inspect
import numpy as np
import pandas as pd
import pyspark
import databricks.koalas as ks
from databricks.koalas.exceptions import PandasNotImplementedError
from databricks.koalas.missing.indexes import MissingPandasLikeIndex, MissingPandasLikeMultiIndex
from databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils
class IndexesTest(ReusedSQLTestCase, TestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0],},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def kdf(self):
return ks.from_pandas(self.pdf)
def test_index(self):
for pdf in [
pd.DataFrame(np.random.randn(10, 5), index=list("abcdefghij")),
pd.DataFrame(
np.random.randn(10, 5), index=pd.date_range("2011-01-01", freq="D", periods=10)
),
pd.DataFrame(np.random.randn(10, 5), columns=list("abcde")).set_index(["a", "b"]),
]:
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.index, pdf.index)
def test_index_getattr(self):
kidx = self.kdf.index
item = "databricks"
expected_error_message = "'Index' object has no attribute '{}'".format(item)
with self.assertRaisesRegex(AttributeError, expected_error_message):
kidx.__getattr__(item)
def test_multi_index_getattr(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
kidx = kdf.index
item = "databricks"
expected_error_message = "'MultiIndex' object has no attribute '{}'".format(item)
with self.assertRaisesRegex(AttributeError, expected_error_message):
kidx.__getattr__(item)
def test_to_series(self):
pidx = self.pdf.index
kidx = self.kdf.index
self.assert_eq(kidx.to_series(), pidx.to_series())
self.assert_eq(repr(kidx.to_series(name="a")), repr(pidx.to_series(name="a")))
# With name
pidx.name = "Koalas"
kidx.name = "Koalas"
self.assert_eq(repr(kidx.to_series()), repr(pidx.to_series()))
self.assert_eq(repr(kidx.to_series(name=("x", "a"))), repr(pidx.to_series(name=("x", "a"))))
# With tupled name
pidx.name = ("x", "a")
kidx.name = ("x", "a")
self.assert_eq(repr(kidx.to_series()), repr(pidx.to_series()))
self.assert_eq(repr(kidx.to_series(name="a")), repr(pidx.to_series(name="a")))
self.assert_eq((kidx + 1).to_series(), (pidx + 1).to_series())
pidx = self.pdf.set_index("b", append=True).index
kidx = self.kdf.set_index("b", append=True).index
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
self.assert_eq(kidx.to_series(), pidx.to_series())
self.assert_eq(kidx.to_series(name="a"), pidx.to_series(name="a"))
def test_to_frame(self):
pidx = self.pdf.index
kidx = self.kdf.index
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
pidx.name = "a"
kidx.name = "a"
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
# The `name` argument is added in pandas 0.24.
self.assert_eq(repr(kidx.to_frame(name="x")), repr(pidx.to_frame(name="x")))
self.assert_eq(
repr(kidx.to_frame(index=False, name="x")),
repr(pidx.to_frame(index=False, name="x")),
)
pidx = self.pdf.set_index("b", append=True).index
kidx = self.kdf.set_index("b", append=True).index
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
# The `name` argument is added in pandas 0.24.
self.assert_eq(
repr(kidx.to_frame(name=["x", "y"])), repr(pidx.to_frame(name=["x", "y"]))
)
self.assert_eq(
repr(kidx.to_frame(index=False, name=["x", "y"])),
repr(pidx.to_frame(index=False, name=["x", "y"])),
)
def test_index_names(self):
kdf = self.kdf
self.assertIsNone(kdf.index.name)
idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name="x")
pdf = pd.DataFrame(np.random.randn(10, 5), index=idx, columns=list("abcde"))
kdf = ks.from_pandas(pdf)
pser = pdf.a
kser = kdf.a
self.assertEqual(kdf.index.name, pdf.index.name)
self.assertEqual(kdf.index.names, pdf.index.names)
pidx = pdf.index
kidx = kdf.index
pidx.name = "renamed"
kidx.name = "renamed"
self.assertEqual(kidx.name, pidx.name)
self.assertEqual(kidx.names, pidx.names)
self.assert_eq(kidx, pidx)
self.assertEqual(kdf.index.name, pdf.index.name)
self.assertEqual(kdf.index.names, pdf.index.names)
self.assertEqual(kser.index.names, pser.index.names)
pidx.name = None
kidx.name = None
self.assertEqual(kidx.name, pidx.name)
self.assertEqual(kidx.names, pidx.names)
self.assert_eq(kidx, pidx)
self.assertEqual(kdf.index.name, pdf.index.name)
self.assertEqual(kdf.index.names, pdf.index.names)
self.assertEqual(kser.index.names, pser.index.names)
with self.assertRaisesRegex(ValueError, "Names must be a list-like"):
kidx.names = "hi"
expected_error_message = "Length of new names must be {}, got {}".format(
len(kdf._internal.index_map), len(["0", "1"])
)
with self.assertRaisesRegex(ValueError, expected_error_message):
kidx.names = ["0", "1"]
def test_multi_index_names(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.names, pdf.index.names)
pidx = pdf.index
kidx = kdf.index
pidx.names = ["renamed_number", "renamed_color"]
kidx.names = ["renamed_number", "renamed_color"]
self.assertEqual(kidx.names, pidx.names)
pidx.names = ["renamed_number", None]
kidx.names = ["renamed_number", None]
self.assertEqual(kidx.names, pidx.names)
if LooseVersion(pyspark.__version__) < LooseVersion("2.4"):
# PySpark < 2.4 does not support struct type with arrow enabled.
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
self.assert_eq(kidx, pidx)
else:
self.assert_eq(kidx, pidx)
with self.assertRaises(PandasNotImplementedError):
kidx.name
with self.assertRaises(PandasNotImplementedError):
kidx.name = "renamed"
def test_index_rename(self):
pdf = pd.DataFrame(
np.random.randn(10, 5), index=pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name="x")
)
kdf = ks.from_pandas(pdf)
pidx = pdf.index
kidx = kdf.index
self.assert_eq(kidx.rename("y"), pidx.rename("y"))
self.assert_eq(kdf.index.names, pdf.index.names)
kidx.rename("z", inplace=True)
pidx.rename("z", inplace=True)
self.assert_eq(kidx, pidx)
self.assert_eq(kdf.index.names, pdf.index.names)
self.assert_eq(kidx.rename(None), pidx.rename(None))
self.assert_eq(kdf.index.names, pdf.index.names)
def test_multi_index_rename(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
pmidx = pdf.index
kmidx = kdf.index
self.assert_eq(kmidx.rename(["n", "c"]), pmidx.rename(["n", "c"]))
self.assert_eq(kdf.index.names, pdf.index.names)
kmidx.rename(["num", "col"], inplace=True)
pmidx.rename(["num", "col"], inplace=True)
self.assert_eq(kmidx, pmidx)
self.assert_eq(kdf.index.names, pdf.index.names)
self.assert_eq(kmidx.rename([None, None]), pmidx.rename([None, None]))
self.assert_eq(kdf.index.names, pdf.index.names)
self.assertRaises(TypeError, lambda: kmidx.rename("number"))
self.assertRaises(ValueError, lambda: kmidx.rename(["number"]))
def test_multi_index_levshape(self):
pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2)])
kidx = ks.from_pandas(pidx)
self.assertEqual(pidx.levshape, kidx.levshape)
def test_index_unique(self):
kidx = self.kdf.index
# here the output is different than pandas in terms of order
expected = [0, 1, 3, 5, 6, 8, 9]
self.assert_eq(expected, sorted(kidx.unique().to_pandas()))
self.assert_eq(expected, sorted(kidx.unique(level=0).to_pandas()))
expected = [1, 2, 4, 6, 7, 9, 10]
self.assert_eq(expected, sorted((kidx + 1).unique().to_pandas()))
with self.assertRaisesRegex(IndexError, "Too many levels*"):
kidx.unique(level=1)
with self.assertRaisesRegex(KeyError, "Requested level (hi)*"):
kidx.unique(level="hi")
def test_multi_index_copy(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.index.copy(), pdf.index.copy())
def test_drop_duplicates(self):
pidx = pd.Index([4, 2, 4, 1, 4, 3])
kidx = ks.from_pandas(pidx)
self.assert_eq(kidx.drop_duplicates().sort_values(), pidx.drop_duplicates().sort_values())
self.assert_eq(
(kidx + 1).drop_duplicates().sort_values(), (pidx + 1).drop_duplicates().sort_values()
)
def test_dropna(self):
pidx = pd.Index([np.nan, 2, 4, 1, np.nan, 3])
kidx = ks.from_pandas(pidx)
self.assert_eq(kidx.dropna(), pidx.dropna())
self.assert_eq((kidx + 1).dropna(), (pidx + 1).dropna())
def test_index_symmetric_difference(self):
pidx1 = pd.Index([1, 2, 3, 4])
pidx2 = pd.Index([2, 3, 4, 5])
kidx1 = ks.from_pandas(pidx1)
kidx2 = ks.from_pandas(pidx2)
self.assert_eq(
kidx1.symmetric_difference(kidx2).sort_values(),
pidx1.symmetric_difference(pidx2).sort_values(),
)
self.assert_eq(
(kidx1 + 1).symmetric_difference(kidx2).sort_values(),
(pidx1 + 1).symmetric_difference(pidx2).sort_values(),
)
pmidx1 = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 0, 1, 2]],
)
pmidx2 = pd.MultiIndex(
[["koalas", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 0, 1, 2]],
)
kmidx1 = ks.from_pandas(pmidx1)
kmidx2 = ks.from_pandas(pmidx2)
self.assert_eq(
kmidx1.symmetric_difference(kmidx2).sort_values(),
pmidx1.symmetric_difference(pmidx2).sort_values(),
)
idx = ks.Index(["a", "b", "c"])
midx = ks.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
with self.assertRaisesRegex(NotImplementedError, "Doesn't support*"):
idx.symmetric_difference(midx)
def test_multi_index_symmetric_difference(self):
idx = ks.Index(["a", "b", "c"])
midx = ks.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
midx_ = ks.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
self.assert_eq(
midx.symmetric_difference(midx_),
midx.to_pandas().symmetric_difference(midx_.to_pandas()),
)
with self.assertRaisesRegex(NotImplementedError, "Doesn't support*"):
midx.symmetric_difference(idx)
def test_missing(self):
kdf = ks.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
# Index functions
missing_functions = inspect.getmembers(MissingPandasLikeIndex, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.set_index("a").index, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Index.*{}.*is deprecated".format(name)
):
getattr(kdf.set_index("a").index, name)()
# MultiIndex functions
missing_functions = inspect.getmembers(MissingPandasLikeMultiIndex, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.set_index(["a", "b"]).index, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Index.*{}.*is deprecated".format(name)
):
getattr(kdf.set_index(["a", "b"]).index, name)()
# Index properties
missing_properties = inspect.getmembers(
MissingPandasLikeIndex, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.set_index("a").index, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Index.*{}.*is deprecated".format(name)
):
getattr(kdf.set_index("a").index, name)
# MultiIndex properties
missing_properties = inspect.getmembers(
MissingPandasLikeMultiIndex, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.set_index(["a", "b"]).index, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Index.*{}.*is deprecated".format(name)
):
getattr(kdf.set_index(["a", "b"]).index, name)
def test_index_has_duplicates(self):
indexes = [("a", "b", "c"), ("a", "a", "c"), (1, 3, 3), (1, 2, 3)]
names = [None, "ks", "ks", None]
has_dup = [False, True, True, False]
for idx, name, expected in zip(indexes, names, has_dup):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=pd.Index(idx, name=name))
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.has_duplicates, expected)
def test_multiindex_has_duplicates(self):
indexes = [
[list("abc"), list("edf")],
[list("aac"), list("edf")],
[list("aac"), list("eef")],
[[1, 4, 4], [4, 6, 6]],
]
has_dup = [False, False, True, True]
for idx, expected in zip(indexes, has_dup):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=idx)
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.has_duplicates, expected)
def test_multi_index_not_supported(self):
kdf = ks.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
with self.assertRaisesRegex(TypeError, "cannot perform any with this index type"):
kdf.set_index(["a", "b"]).index.any()
with self.assertRaisesRegex(TypeError, "cannot perform all with this index type"):
kdf.set_index(["a", "b"]).index.all()
def test_index_nlevels(self):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=pd.Index(["a", "b", "c"]))
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.nlevels, 1)
def test_multiindex_nlevel(self):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=[list("abc"), list("def")])
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.nlevels, 2)
def test_multiindex_from_arrays(self):
arrays = [["a", "a", "b", "b"], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays)
kidx = ks.MultiIndex.from_arrays(arrays)
self.assert_eq(pidx, kidx)
def test_multiindex_swaplevel(self):
pidx = pd.MultiIndex.from_arrays([["a", "b"], [1, 2]])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.swaplevel(0, 1), kidx.swaplevel(0, 1))
pidx = pd.MultiIndex.from_arrays([["a", "b"], [1, 2]], names=["word", "number"])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.swaplevel(0, 1), kidx.swaplevel(0, 1))
pidx = pd.MultiIndex.from_arrays([["a", "b"], [1, 2]], names=["word", None])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.swaplevel(-2, -1), kidx.swaplevel(-2, -1))
self.assert_eq(pidx.swaplevel(0, 1), kidx.swaplevel(0, 1))
self.assert_eq(pidx.swaplevel("word", 1), kidx.swaplevel("word", 1))
with self.assertRaisesRegex(IndexError, "Too many levels: Index"):
kidx.swaplevel(-3, "word")
with self.assertRaisesRegex(IndexError, "Too many levels: Index"):
kidx.swaplevel(0, 2)
with self.assertRaisesRegex(IndexError, "Too many levels: Index"):
kidx.swaplevel(0, -3)
with self.assertRaisesRegex(KeyError, "Level work not found"):
kidx.swaplevel(0, "work")
def test_multiindex_droplevel(self):
pidx = pd.MultiIndex.from_tuples(
[("a", "x", 1), ("b", "y", 2)], names=["level1", "level2", "level3"]
)
kidx = ks.from_pandas(pidx)
with self.assertRaisesRegex(IndexError, "Too many levels: Index has only 3 levels, not 5"):
kidx.droplevel(4)
with self.assertRaisesRegex(KeyError, "Level level4 not found"):
kidx.droplevel("level4")
with self.assertRaisesRegex(KeyError, "Level.*level3.*level4.*not found"):
kidx.droplevel([("level3", "level4")])
with self.assertRaisesRegex(
ValueError,
"Cannot remove 4 levels from an index with 3 levels: at least one "
"level must be left.",
):
kidx.droplevel([0, 0, 1, 2])
with self.assertRaisesRegex(
ValueError,
"Cannot remove 3 levels from an index with 3 levels: at least one "
"level must be left.",
):
kidx.droplevel([0, 1, 2])
self.assert_eq(pidx.droplevel(0), kidx.droplevel(0))
self.assert_eq(pidx.droplevel([0, 1]), kidx.droplevel([0, 1]))
self.assert_eq(pidx.droplevel([0, "level2"]), kidx.droplevel([0, "level2"]))
def test_index_fillna(self):
pidx = pd.Index([1, 2, None])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.fillna(0), kidx.fillna(0))
self.assert_eq(pidx.rename("name").fillna(0), kidx.rename("name").fillna(0))
with self.assertRaisesRegex(TypeError, "Unsupported type <class 'list'>"):
kidx.fillna([1, 2])
def test_index_drop(self):
pidx = pd.Index([1, 2, 3])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.drop(1), kidx.drop(1))
self.assert_eq(pidx.drop([1, 2]), kidx.drop([1, 2]))
def test_multiindex_drop(self):
pidx = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z")], names=["level1", "level2"]
)
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.drop("a"), kidx.drop("a"))
self.assert_eq(pidx.drop(["a", "b"]), kidx.drop(["a", "b"]))
self.assert_eq(pidx.drop(["x", "y"], level=1), kidx.drop(["x", "y"], level=1))
self.assert_eq(pidx.drop(["x", "y"], level="level2"), kidx.drop(["x", "y"], level="level2"))
pidx.names = ["lv1", "lv2"]
kidx.names = ["lv1", "lv2"]
self.assert_eq(pidx.drop(["x", "y"], level="lv2"), kidx.drop(["x", "y"], level="lv2"))
self.assertRaises(IndexError, lambda: kidx.drop(["a", "b"], level=2))
self.assertRaises(KeyError, lambda: kidx.drop(["a", "b"], level="level"))
kidx.names = ["lv", "lv"]
self.assertRaises(ValueError, lambda: kidx.drop(["x", "y"], level="lv"))
def test_sort_values(self):
pidx = pd.Index([-10, -100, 200, 100])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.sort_values(), kidx.sort_values())
self.assert_eq(pidx.sort_values(ascending=False), kidx.sort_values(ascending=False))
pidx.name = "koalas"
kidx.name = "koalas"
self.assert_eq(pidx.sort_values(), kidx.sort_values())
self.assert_eq(pidx.sort_values(ascending=False), kidx.sort_values(ascending=False))
pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
kidx = ks.from_pandas(pidx)
pidx.names = ["hello", "koalas", "goodbye"]
kidx.names = ["hello", "koalas", "goodbye"]
self.assert_eq(pidx.sort_values(), kidx.sort_values())
self.assert_eq(pidx.sort_values(ascending=False), kidx.sort_values(ascending=False))
def test_index_drop_duplicates(self):
pidx = pd.Index([1, 1, 2])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.drop_duplicates().sort_values(), kidx.drop_duplicates().sort_values())
pidx = pd.MultiIndex.from_tuples([(1, 1), (1, 1), (2, 2)], names=["level1", "level2"])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.drop_duplicates().sort_values(), kidx.drop_duplicates().sort_values())
def test_index_sort(self):
idx = ks.Index([1, 2, 3, 4, 5])
midx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2)])
with self.assertRaisesRegex(
TypeError, "cannot sort an Index object in-place, use sort_values instead"
):
idx.sort()
with self.assertRaisesRegex(
TypeError, "cannot sort an Index object in-place, use sort_values instead"
):
midx.sort()
def test_multiindex_isna(self):
kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
with self.assertRaisesRegex(NotImplementedError, "isna is not defined for MultiIndex"):
kidx.isna()
with self.assertRaisesRegex(NotImplementedError, "isna is not defined for MultiIndex"):
kidx.isnull()
with self.assertRaisesRegex(NotImplementedError, "notna is not defined for MultiIndex"):
kidx.notna()
with self.assertRaisesRegex(NotImplementedError, "notna is not defined for MultiIndex"):
kidx.notnull()
def test_index_nunique(self):
pidx = pd.Index([1, 1, 2, None])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.nunique(), kidx.nunique())
self.assert_eq(pidx.nunique(dropna=True), kidx.nunique(dropna=True))
def test_multiindex_nunique(self):
kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
with self.assertRaisesRegex(NotImplementedError, "notna is not defined for MultiIndex"):
kidx.notnull()
def test_multiindex_rename(self):
pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
kidx = ks.from_pandas(pidx)
pidx = pidx.rename(list("ABC"))
kidx = kidx.rename(list("ABC"))
self.assert_eq(pidx, kidx)
pidx = pidx.rename(["my", "name", "is"])
kidx = kidx.rename(["my", "name", "is"])
self.assert_eq(pidx, kidx)
def test_multiindex_set_names(self):
pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
kidx = ks.from_pandas(pidx)
pidx = pidx.set_names(["set", "new", "names"])
kidx = kidx.set_names(["set", "new", "names"])
self.assert_eq(pidx, kidx)
pidx.set_names(["set", "new", "names"], inplace=True)
kidx.set_names(["set", "new", "names"], inplace=True)
self.assert_eq(pidx, kidx)
pidx = pidx.set_names("first", level=0)
kidx = kidx.set_names("first", level=0)
self.assert_eq(pidx, kidx)
pidx = pidx.set_names("second", level=1)
kidx = kidx.set_names("second", level=1)
self.assert_eq(pidx, kidx)
pidx = pidx.set_names("third", level=2)
kidx = kidx.set_names("third", level=2)
self.assert_eq(pidx, kidx)
pidx.set_names("first", level=0, inplace=True)
kidx.set_names("first", level=0, inplace=True)
self.assert_eq(pidx, kidx)
pidx.set_names("second", level=1, inplace=True)
kidx.set_names("second", level=1, inplace=True)
self.assert_eq(pidx, kidx)
pidx.set_names("third", level=2, inplace=True)
kidx.set_names("third", level=2, inplace=True)
self.assert_eq(pidx, kidx)
def test_multiindex_from_tuples(self):
tuples = [(1, "red"), (1, "blue"), (2, "red"), (2, "blue")]
pidx = pd.MultiIndex.from_tuples(tuples)
kidx = ks.MultiIndex.from_tuples(tuples)
self.assert_eq(pidx, kidx)
def test_multiindex_from_product(self):
iterables = [[0, 1, 2], ["green", "purple"]]
pidx = pd.MultiIndex.from_product(iterables)
kidx = ks.MultiIndex.from_product(iterables)
self.assert_eq(pidx, kidx)
def test_multiindex_tuple_column_name(self):
column_labels = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")])
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=column_labels)
pdf.set_index(("a", "x"), append=True, inplace=True)
kdf = ks.from_pandas(pdf)
self.assert_eq(pdf, kdf)
def test_len(self):
pidx = pd.Index(range(10000))
kidx = ks.from_pandas(pidx)
self.assert_eq(len(pidx), len(kidx))
pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
self.assert_eq(len(pidx), len(kidx))
def test_delete(self):
pidx = pd.Index([10, 9, 8, 7, 6, 7, 8, 9, 10])
kidx = ks.Index([10, 9, 8, 7, 6, 7, 8, 9, 10])
self.assert_eq(pidx.delete(5).sort_values(), kidx.delete(5).sort_values())
self.assert_eq(pidx.delete(-5).sort_values(), kidx.delete(-5).sort_values())
if LooseVersion(np.__version__) < LooseVersion("1.19"):
self.assert_eq(
pidx.delete([0, 10000]).sort_values(), kidx.delete([0, 10000]).sort_values()
)
self.assert_eq(
pidx.delete([10000, 20000]).sort_values(), kidx.delete([10000, 20000]).sort_values()
)
else:
self.assert_eq(pidx.delete([0]).sort_values(), kidx.delete([0, 10000]).sort_values())
self.assert_eq(pidx.delete([]).sort_values(), kidx.delete([10000, 20000]).sort_values())
with self.assertRaisesRegex(IndexError, "index 10 is out of bounds for axis 0 with size 9"):
kidx.delete(10)
pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
self.assert_eq(pidx.delete(1).sort_values(), kidx.delete(1).sort_values())
self.assert_eq(pidx.delete(-1).sort_values(), kidx.delete(-1).sort_values())
if LooseVersion(np.__version__) < LooseVersion("1.19"):
self.assert_eq(
pidx.delete([0, 10000]).sort_values(), kidx.delete([0, 10000]).sort_values()
)
self.assert_eq(
pidx.delete([10000, 20000]).sort_values(), kidx.delete([10000, 20000]).sort_values()
)
else:
self.assert_eq(pidx.delete([0]).sort_values(), kidx.delete([0, 10000]).sort_values())
self.assert_eq(pidx.delete([]).sort_values(), kidx.delete([10000, 20000]).sort_values())
def test_append(self):
# Index
pidx = pd.Index(range(10000))
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.append(pidx), kidx.append(kidx))
# Index with name
pidx1 = pd.Index(range(10000), name="a")
pidx2 = pd.Index(range(10000), name="b")
kidx1 = ks.from_pandas(pidx1)
kidx2 = ks.from_pandas(pidx2)
self.assert_eq(pidx1.append(pidx2), kidx1.append(kidx2))
self.assert_eq(pidx2.append(pidx1), kidx2.append(kidx1))
# Index from DataFrame
pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["a", "b", "c"])
pdf2 = pd.DataFrame({"a": [7, 8, 9], "d": [10, 11, 12]}, index=["x", "y", "z"])
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
pidx1 = pdf1.set_index("a").index
pidx2 = pdf2.set_index("d").index
kidx1 = kdf1.set_index("a").index
kidx2 = kdf2.set_index("d").index
self.assert_eq(pidx1.append(pidx2), kidx1.append(kidx2))
self.assert_eq(pidx2.append(pidx1), kidx2.append(kidx1))
# Index from DataFrame with MultiIndex columns
pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
pdf2 = pd.DataFrame({"a": [7, 8, 9], "d": [10, 11, 12]})
pdf1.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y")])
pdf2.columns = pd.MultiIndex.from_tuples([("a", "x"), ("d", "y")])
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
pidx1 = pdf1.set_index(("a", "x")).index
pidx2 = pdf2.set_index(("d", "y")).index
kidx1 = kdf1.set_index(("a", "x")).index
kidx2 = kdf2.set_index(("d", "y")).index
self.assert_eq(pidx1.append(pidx2), kidx1.append(kidx2))
self.assert_eq(pidx2.append(pidx1), kidx2.append(kidx1))
# MultiIndex
pmidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
kmidx = ks.from_pandas(pmidx)
self.assert_eq(pmidx.append(pmidx), kmidx.append(kmidx))
# MultiIndex with names
pmidx1 = pd.MultiIndex.from_tuples(
[("a", "x", 1), ("b", "y", 2), ("c", "z", 3)], names=["x", "y", "z"]
)
pmidx2 = pd.MultiIndex.from_tuples(
[("a", "x", 1), ("b", "y", 2), ("c", "z", 3)], names=["p", "q", "r"]
)
kmidx1 = ks.from_pandas(pmidx1)
kmidx2 = ks.from_pandas(pmidx2)
self.assert_eq(pmidx1.append(pmidx2), kmidx1.append(kmidx2))
self.assert_eq(pmidx2.append(pmidx1), kmidx2.append(kmidx1))
self.assert_eq(pmidx1.append(pmidx2).names, kmidx1.append(kmidx2).names)
self.assert_eq(pmidx1.append(pmidx2).names, kmidx1.append(kmidx2).names)
# Index & MultiIndex currently is not supported
expected_error_message = r"append\(\) between Index & MultiIndex currently is not supported"
with self.assertRaisesRegex(NotImplementedError, expected_error_message):
kidx.append(kmidx)
with self.assertRaisesRegex(NotImplementedError, expected_error_message):
kmidx.append(kidx)
def test_argmin(self):
pidx = pd.Index([100, 50, 10, 20, 30, 60, 0, 50, 0, 100, 100, 100, 20, 0, 0])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.argmin(), kidx.argmin())
# MultiIndex
kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
with self.assertRaisesRegex(
TypeError, "reduction operation 'argmin' not allowed for this dtype"
):
kidx.argmin()
def test_argmax(self):
pidx = pd.Index([100, 50, 10, 20, 30, 60, 0, 50, 0, 100, 100, 100, 20, 0, 0])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.argmax(), kidx.argmax())
# MultiIndex
kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
with self.assertRaisesRegex(
TypeError, "reduction operation 'argmax' not allowed for this dtype"
):
kidx.argmax()
def test_monotonic(self):
# test monotonic_increasing & monotonic_decreasing for MultiIndex.
# Since the Behavior for null value was changed in pandas >= 1.0.0,
# several cases are tested differently.
datas = []
# increasing / decreasing ordered each index level with string
datas.append([("w", "a"), ("x", "b"), ("y", "c"), ("z", "d")])
datas.append([("w", "d"), ("x", "c"), ("y", "b"), ("z", "a")])
datas.append([("z", "a"), ("y", "b"), ("x", "c"), ("w", "d")])
datas.append([("z", "d"), ("y", "c"), ("x", "b"), ("w", "a")])
# mixed order each index level with string
datas.append([("z", "a"), ("x", "b"), ("y", "c"), ("w", "d")])
datas.append([("z", "a"), ("y", "c"), ("x", "b"), ("w", "d")])
# increasing / decreasing ordered each index level with integer
datas.append([(1, 100), (2, 200), (3, 300), (4, 400), (5, 500)])
datas.append([(1, 500), (2, 400), (3, 300), (4, 200), (5, 100)])
datas.append([(5, 100), (4, 200), (3, 300), (2, 400), (1, 500)])
datas.append([(5, 500), (4, 400), (3, 300), (2, 200), (1, 100)])
# mixed order each index level with integer
datas.append([(1, 500), (3, 400), (2, 300), (4, 200), (5, 100)])
datas.append([(1, 100), (2, 300), (3, 200), (4, 400), (5, 500)])
# integer / negative mixed tests
datas.append([("a", -500), ("b", -400), ("c", -300), ("d", -200), ("e", -100)])
datas.append([("e", -500), ("d", -400), ("c", -300), ("b", -200), ("a", -100)])
datas.append([(-5, "a"), (-4, "b"), (-3, "c"), (-2, "d"), (-1, "e")])
datas.append([(-5, "e"), (-4, "d"), (-3, "c"), (-2, "b"), (-1, "a")])
datas.append([(-5, "e"), (-3, "d"), (-2, "c"), (-4, "b"), (-1, "a")])
datas.append([(-5, "e"), (-4, "c"), (-3, "b"), (-2, "d"), (-1, "a")])
# None type tests (None type is treated as the smallest value)
datas.append([(1, 100), (2, 200), (None, 300), (4, 400), (5, 500)])
datas.append([(5, None), (4, 200), (3, 300), (2, 400), (1, 500)])
datas.append([(5, 100), (4, 200), (3, None), (2, 400), (1, 500)])
datas.append([(5, 100), (4, 200), (3, 300), (2, 400), (1, None)])
datas.append([(1, 100), (2, 200), (None, None), (4, 400), (5, 500)])
datas.append([(-5, None), (-4, None), (-3, None), (-2, None), (-1, None)])
datas.append([(None, "e"), (None, "c"), (None, "b"), (None, "d"), (None, "a")])
datas.append([(None, None), (None, None), (None, None), (None, None), (None, None)])
# duplicated index value tests
datas.append([("x", "d"), ("y", "c"), ("y", "b"), ("z", "a")])
datas.append([("x", "d"), ("y", "b"), ("y", "c"), ("z", "a")])
datas.append([("x", "d"), ("y", "c"), ("y", None), ("z", "a")])
datas.append([("x", "d"), ("y", None), ("y", None), ("z", "a")])
datas.append([("x", "d"), ("y", "c"), ("y", "b"), (None, "a")])
datas.append([("x", "d"), ("y", "b"), ("y", "c"), (None, "a")])
# more depth tests
datas.append([("x", "d", "o"), ("y", "c", "p"), ("y", "c", "q"), ("z", "a", "r")])
datas.append([("x", "d", "o"), ("y", "c", "q"), ("y", "c", "p"), ("z", "a", "r")])
datas.append([("x", "d", "o"), ("y", "c", "p"), ("y", "c", None), ("z", "a", "r")])
datas.append([("x", "d", "o"), ("y", "c", None), ("y", "c", None), ("z", "a", "r")])
for data in datas:
with self.subTest(data=data):
pmidx = pd.MultiIndex.from_tuples(data)
kmidx = ks.from_pandas(pmidx)
self.assert_eq(kmidx.is_monotonic_increasing, pmidx.is_monotonic_increasing)
self.assert_eq(kmidx.is_monotonic_decreasing, pmidx.is_monotonic_decreasing)
# The datas below are showing different result depends on pandas version.
# Because the behavior of handling null values is changed in pandas >= 1.0.0.
datas = []
datas.append([(None, 100), (2, 200), (3, 300), (4, 400), (5, 500)])
datas.append([(1, 100), (2, 200), (3, 300), (4, 400), (None, 500)])
datas.append([(None, None), (2, 200), (3, 300), (4, 400), (5, 500)])
datas.append([(1, 100), (2, 200), (3, 300), (4, 400), (None, None)])
datas.append([("x", "d"), ("y", None), ("y", "c"), ("z", "a")])
datas.append([("x", "d", "o"), ("y", "c", None), ("y", "c", "q"), ("z", "a", "r")])
for data in datas:
with self.subTest(data=data):
pmidx = pd.MultiIndex.from_tuples(data)
kmidx = ks.from_pandas(pmidx)
expected_increasing_result = pmidx.is_monotonic_increasing
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
expected_increasing_result = not expected_increasing_result
self.assert_eq(kmidx.is_monotonic_increasing, expected_increasing_result)
self.assert_eq(kmidx.is_monotonic_decreasing, pmidx.is_monotonic_decreasing)
def test_difference(self):
# Index
kidx1 = ks.Index([1, 2, 3, 4], name="koalas")
kidx2 = ks.Index([3, 4, 5, 6], name="koalas")
pidx1 = kidx1.to_pandas()
pidx2 = kidx2.to_pandas()
self.assert_eq(kidx1.difference(kidx2).sort_values(), pidx1.difference(pidx2).sort_values())
self.assert_eq(
kidx1.difference([3, 4, 5, 6]).sort_values(),
pidx1.difference([3, 4, 5, 6]).sort_values(),
)
self.assert_eq(
kidx1.difference((3, 4, 5, 6)).sort_values(),
pidx1.difference((3, 4, 5, 6)).sort_values(),
)
self.assert_eq(
kidx1.difference({3, 4, 5, 6}).sort_values(),
pidx1.difference({3, 4, 5, 6}).sort_values(),
)
self.assert_eq(
kidx1.difference({3: 1, 4: 2, 5: 3, 6: 4}).sort_values(),
pidx1.difference({3: 1, 4: 2, 5: 3, 6: 4}).sort_values(),
)
# Exceptions for Index
with self.assertRaisesRegex(TypeError, "Input must be Index or array-like"):
kidx1.difference("1234")
with self.assertRaisesRegex(TypeError, "Input must be Index or array-like"):
kidx1.difference(1234)
with self.assertRaisesRegex(TypeError, "Input must be Index or array-like"):
kidx1.difference(12.34)
with self.assertRaisesRegex(TypeError, "Input must be Index or array-like"):
kidx1.difference(None)
with self.assertRaisesRegex(TypeError, "Input must be Index or array-like"):
kidx1.difference(np.nan)
with self.assertRaisesRegex(
ValueError, "The 'sort' keyword only takes the values of None or True; 1 was passed."
):
kidx1.difference(kidx2, sort=1)
# MultiIndex
kidx1 = ks.MultiIndex.from_tuples(
[("a", "x", 1), ("b", "y", 2), ("c", "z", 3)], names=["hello", "koalas", "world"]
)
kidx2 = ks.MultiIndex.from_tuples(
[("a", "x", 1), ("b", "z", 2), ("k", "z", 3)], names=["hello", "koalas", "world"]
)
pidx1 = kidx1.to_pandas()
pidx2 = kidx2.to_pandas()
self.assert_eq(kidx1.difference(kidx2).sort_values(), pidx1.difference(pidx2).sort_values())
self.assert_eq(
kidx1.difference({("a", "x", 1)}).sort_values(),
pidx1.difference({("a", "x", 1)}).sort_values(),
)
self.assert_eq(
kidx1.difference({("a", "x", 1): [1, 2, 3]}).sort_values(),
pidx1.difference({("a", "x", 1): [1, 2, 3]}).sort_values(),
)
# Exceptions for MultiIndex
with self.assertRaisesRegex(TypeError, "other must be a MultiIndex or a list of tuples"):
kidx1.difference(["b", "z", "2"])
def test_repeat(self):
pidx = pd.Index(["a", "b", "c"])
kidx = ks.from_pandas(pidx)
self.assert_eq(kidx.repeat(3).sort_values(), pidx.repeat(3).sort_values())
self.assert_eq(kidx.repeat(0).sort_values(), pidx.repeat(0).sort_values())
self.assert_eq((kidx + "x").repeat(3).sort_values(), (pidx + "x").repeat(3).sort_values())
self.assertRaises(ValueError, lambda: kidx.repeat(-1))
self.assertRaises(ValueError, lambda: kidx.repeat("abc"))
pmidx = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
kmidx = ks.from_pandas(pmidx)
self.assert_eq(kmidx.repeat(3).sort_values(), pmidx.repeat(3).sort_values())
self.assert_eq(kmidx.repeat(0).sort_values(), pmidx.repeat(0).sort_values())
self.assertRaises(ValueError, lambda: kmidx.repeat(-1))
self.assertRaises(ValueError, lambda: kmidx.repeat("abc"))
def test_unique(self):
pidx = pd.Index(["a", "b", "a"])
kidx = ks.from_pandas(pidx)
self.assert_eq(kidx.unique().sort_values(), pidx.unique().sort_values())
self.assert_eq(kidx.unique().sort_values(), pidx.unique().sort_values())
pmidx = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("x", "a")])
kmidx = ks.from_pandas(pmidx)
self.assert_eq(kmidx.unique().sort_values(), pmidx.unique().sort_values())
self.assert_eq(kmidx.unique().sort_values(), pmidx.unique().sort_values())
def test_asof(self):
# Increasing values
pidx = pd.Index(["2013-12-31", "2014-01-02", "2014-01-03"])
kidx = ks.from_pandas(pidx)
self.assert_eq(kidx.asof("2014-01-01"), pidx.asof("2014-01-01"))
self.assert_eq(kidx.asof("2014-01-02"), pidx.asof("2014-01-02"))
self.assert_eq(repr(kidx.asof("1999-01-02")), repr(pidx.asof("1999-01-02")))
# Decreasing values
pidx = pd.Index(["2014-01-03", "2014-01-02", "2013-12-31"])
kidx = ks.from_pandas(pidx)
self.assert_eq(kidx.asof("2014-01-01"), pidx.asof("2014-01-01"))
self.assert_eq(kidx.asof("2014-01-02"), pidx.asof("2014-01-02"))
self.assert_eq(kidx.asof("1999-01-02"), pidx.asof("1999-01-02"))
self.assert_eq(repr(kidx.asof("2015-01-02")), repr(pidx.asof("2015-01-02")))
# Not increasing, neither decreasing (ValueError)
kidx = ks.Index(["2013-12-31", "2015-01-02", "2014-01-03"])
self.assertRaises(ValueError, lambda: kidx.asof("2013-12-31"))
kmidx = ks.MultiIndex.from_tuples([("a", "a"), ("a", "b"), ("a", "c")])
self.assertRaises(NotImplementedError, lambda: kmidx.asof(("a", "b")))
def test_union(self):
# Index
pidx1 = pd.Index([1, 2, 3, 4])
pidx2 = pd.Index([3, 4, 5, 6])
kidx1 = ks.from_pandas(pidx1)
kidx2 = ks.from_pandas(pidx2)
self.assert_eq(kidx1.union(kidx2), pidx1.union(pidx2))
self.assert_eq(kidx2.union(kidx1), pidx2.union(pidx1))
self.assert_eq(
kidx1.union([3, 4, 5, 6]), pidx1.union([3, 4, 5, 6]),
)
self.assert_eq(
kidx2.union([1, 2, 3, 4]), pidx2.union([1, 2, 3, 4]),
)
self.assert_eq(
kidx1.union(ks.Series([3, 4, 5, 6])), pidx1.union(pd.Series([3, 4, 5, 6])),
)
self.assert_eq(
kidx2.union(ks.Series([1, 2, 3, 4])), pidx2.union(pd.Series([1, 2, 3, 4])),
)
# Testing if the result is correct after sort=False.
# The `sort` argument is added in pandas 0.24.
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
self.assert_eq(
kidx1.union(kidx2, sort=False).sort_values(),
pidx1.union(pidx2, sort=False).sort_values(),
)
self.assert_eq(
kidx2.union(kidx1, sort=False).sort_values(),
pidx2.union(pidx1, sort=False).sort_values(),
)
self.assert_eq(
kidx1.union([3, 4, 5, 6], sort=False).sort_values(),
pidx1.union([3, 4, 5, 6], sort=False).sort_values(),
)
self.assert_eq(
kidx2.union([1, 2, 3, 4], sort=False).sort_values(),
pidx2.union([1, 2, 3, 4], sort=False).sort_values(),
)
self.assert_eq(
kidx1.union(ks.Series([3, 4, 5, 6]), sort=False).sort_values(),
pidx1.union(pd.Series([3, 4, 5, 6]), sort=False).sort_values(),
)
self.assert_eq(
kidx2.union(ks.Series([1, 2, 3, 4]), sort=False).sort_values(),
pidx2.union(pd.Series([1, 2, 3, 4]), sort=False).sort_values(),
)
# Duplicated values for Index is supported in pandas >= 1.0.0
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
pidx1 = pd.Index([1, 2, 3, 4, 3, 4, 3, 4])
pidx2 = pd.Index([3, 4, 3, 4, 5, 6])
kidx1 = ks.from_pandas(pidx1)
kidx2 = ks.from_pandas(pidx2)
self.assert_eq(kidx1.union(kidx2), pidx1.union(pidx2))
self.assert_eq(kidx2.union(kidx1), pidx2.union(pidx1))
self.assert_eq(
kidx1.union([3, 4, 3, 3, 5, 6]), pidx1.union([3, 4, 3, 4, 5, 6]),
)
self.assert_eq(
kidx2.union([1, 2, 3, 4, 3, 4, 3, 4]), pidx2.union([1, 2, 3, 4, 3, 4, 3, 4]),
)
self.assert_eq(
kidx1.union(ks.Series([3, 4, 3, 3, 5, 6])),
pidx1.union(pd.Series([3, 4, 3, 4, 5, 6])),
)
self.assert_eq(
kidx2.union(ks.Series([1, 2, 3, 4, 3, 4, 3, 4])),
pidx2.union( | pd.Series([1, 2, 3, 4, 3, 4, 3, 4]) | pandas.Series |
import os
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import plotly.graph_objects as go
from plotly import express as px
import pandas as pd
from layout import layout_1, layout_2, navbar, footer
from stockdata import stock_df, make_human_readable
# Css stylesheet
external_stylesheets = [
dbc.themes.BOOTSTRAP,
"https://maxcdn.bootstrapcdn.com/font-awesome/4.4.0/css/font-awesome.min.css",
]
app = dash.Dash(
__name__,
title="Py-Dash",
external_stylesheets=external_stylesheets,
)
server = app.server
# Main layout of the app
app.layout = html.Div(className="", children=[navbar, layout_1, layout_2, footer])
# Callback function for bootstrap Modal
@app.callback(
Output("modal", "is_open"),
[Input("open", "n_clicks"), Input("close", "n_clicks")],
[State("modal", "is_open")],
)
def toggle_modal(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
# Candlestick chart for single symbol
@app.callback(
Output("graph1", "figure"),
[Input("ticker", "value"), Input("toggle-rangeslider", "value")],
)
def display_candlestick(ticker, value):
df = stock_df[stock_df.index == ticker]
fig = go.Figure(
go.Candlestick(
x=df["Date"],
open=df["Open Price"],
high=df["High Price"],
low=df["Low Price"],
close=df["Close Price"],
)
)
fig.update_layout(
template="plotly_dark",
title=ticker,
yaxis_title="Price (INR)",
xaxis_title="Date",
xaxis_rangeslider_visible="slider" in value,
)
fig.layout.autosize = True
return fig
# Line chart for multiple selected symbols
@app.callback(
Output("line_chart", "figure"),
[Input("multi_tickers", "value"), Input("toggle-rangeslider2", "value")],
)
def display_line_chart(tickers, value):
fig = go.Figure()
# List comprehension to create trace for fig objects because
# tickers is a list and it can have only one element also so
# if someone selects multiple symbols it will create multiple line charts accordingly
traces = [
go.Scatter(
x=stock_df["Date"],
y=stock_df[stock_df.index == tic]["Close Price"],
mode="lines",
name=tic,
)
for tic in tickers
]
fig.add_traces(traces)
fig.update_layout(
template="plotly_dark",
title=", ".join(tickers),
yaxis_title="Price (INR)",
xaxis_title="Date",
xaxis_rangeslider_visible="slider" in value,
# legend=dict(x=3, y=2),
)
return fig
# bar graph for Total traded quantity vs Deliverable quantity
@app.callback(
Output("bar_graph", "figure"),
[
Input("bar_ticker", "value"),
Input("bar_date_picker", "start_date"),
Input("bar_date_picker", "end_date"),
],
)
def display_bar_graph(bar_ticker, start_date, end_date):
df = stock_df[
(stock_df.index == bar_ticker)
& (stock_df["Pd_date"] >= pd.to_datetime(start_date))
& (stock_df["Pd_date"] <= pd.to_datetime(end_date))
]
fig = go.Figure(
[
go.Bar(
name="Total",
x=df["Date"],
y=df["Total Traded Quantity"],
text=df["Total Traded Quantity"],
textposition="auto",
# orientation="h",
),
go.Bar(
name="Deliverable",
x=df["Date"],
y=df["Deliverable Qty"],
text=df["Deliverable Qty"],
textposition="outside",
hovertext=df["% Dly Qt to Traded Qty"],
),
]
)
fig.update_layout(
template="plotly_dark",
title="Total traded Qty vs Deliverable Qty",
barmode="group",
bargap=0.15,
# uniformtext_minsize=6,
uniformtext_mode="hide",
xaxis=dict(title="Date"),
yaxis=dict(title="Volume"),
)
return fig
# Pie chart for turnover data
@app.callback(
Output("pie_chart", "figure"),
[Input("pie_tickers", "value"), Input("pie_date_picker_single", "date")],
)
def display_pie_chart(pie_tickers, date):
values = [
float(
stock_df[
(stock_df.index == tic) & (stock_df["Pd_date"] == | pd.to_datetime(date) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 13 17:52:00 2021
@author: SimenLab
"""
import pandas as pd
def Data_getter(file_location):
"""A function which gets and prepares data from CSV files, as well as
returning some additional params like an number of ROIs and their
corresponding labels.
Parameters
---------
File:
The directory of a CSV .txt file containing the data, with each ROI
represented as individual columns in the file.
Returns
-------
Stimulus DataFrame, stimulus array (as an alternative), number of
ROIs, and their labels.
"""
file = file_location
avgs_data = pd.read_csv((file), sep="\t", header=None, engine="python")
##Label data
ROI_num = avgs_data.shape[1]
# Consider making data loading its own function?
"Can put optional manipulations to data here"
# pd.read_csv((file), sep="\t", header=None) #, names = labels)
averages_dataframe = avgs_data
avgerages_array = pd.DataFrame.to_numpy(averages_dataframe)
return averages_dataframe, avgerages_array, ROI_num
class Data:
def Data_getter(self):
"""A function which gets and prepares data from CSV files, as well as
returning some additional params like an number of ROIs and their
corresponding labels.
Parameters
---------
File:
The directory of a CSV .txt file containing the data, with each ROI
represented as individual columns in the file.
Returns
-------
Stimulus DataFrame, stimulus array (as an alternative), number of
ROIs, and their labels.
"""
file = self.file_location
avgs_data = pd.read_csv((file), sep="\t", header=None, engine="python")
##Label data
ROI_num = avgs_data.shape[1]
# Consider making data loading its own function?
"Can put optional manipulations to data here"
# pd.read_csv((file), sep="\t", header=None) #, names = labels)
averages_dataframe = avgs_data
avgerages_array = | pd.DataFrame.to_numpy(averages_dataframe) | pandas.DataFrame.to_numpy |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 15 22:15:22 2018
@author: tknapen https://github.com/tknapen/hedfpy/blob/master/hedfpy/EyeSignalOperator.py
- Wrapper sacc detection
- sacc detection algorithm
- interpolate gaze function (for pl)
"""
import numpy as np
from scipy.interpolate import PchipInterpolator
import pandas as pd
import numpy.linalg as LA
from functions.et_helper import append_eventtype_to_sample
import functions.et_make_df as make_df
from matplotlib import pyplot as plt
import logging
#%% WRAPPER TO DETECT SACCADES (IN THE CASE OF PL SAMPLES ARE INTERPOLATED FIRST)
def detect_saccades_engbert_mergenthaler(etsamples,etevents=None,et = None,engbert_lambda=5):
# Input: etsamples
# fs: sampling frequency
# Output: saccades (df) with expanded / raw
# amplitude, duration, start_time, end_time, peak_velocity
# get a logger
logger = logging.getLogger(__name__)
# if you specify a sampling frequency, the samples get interpolated
# to have regular sampled data in order to apply the saccade detection algorithm
etsamples = etsamples.copy()
logger.debug('eyetracker: %s',et)
if etevents is not None:
logger.debug('Setting Eyeblink Data to 0')
etsamples = append_eventtype_to_sample(etsamples,etevents,eventtype='blink')
etsamples.loc[etsamples.type=='blink',['gx','gy']] = np.nan
if 'outside' in etsamples:
logger.debug('removing bad-samples for saccade detection')
etsamples.loc[etsamples.outside==True,['gx','gy']] = np.nan
# for pl the gaze needs to be interpolated first
if et == 'pl':
fs = 240
interpgaze = interpolate_gaze(etsamples, fs=fs)
elif et == 'el':
# Eyelink is already interpolated
interpgaze = etsamples
if np.nansum(str(etsamples.type)=='blink')>0:
interpgaze['is_blink'] = etsamples.type=='blink'
else:
interpgaze['is_blink'] = 0
if np.isclose(etsamples.iloc[1:3].smpl_time.agg(np.diff),0.002):
fs = 500
else:
# for 5 subjects we have a sampling rate of only 250Hz
fs = 250
# apply the saccade detection algorithm
saccades = apply_engbert_mergenthaler(xy_data = interpgaze[['gx','gy']],is_blink = interpgaze['is_blink'], vel_data = None,sample_rate=fs,l = engbert_lambda)
#sacsave = saccades.copy()
#saccades = sacsave
# convert samples of data back to sample time
for fn in ['raw_start_time','raw_end_time','expanded_start_time','expanded_end_time']:
saccades[fn]=np.array(interpgaze.smpl_time.iloc[np.array(saccades[fn])])
return saccades
#%% SACCADE DETECTION ALGORITHM
def apply_engbert_mergenthaler(xy_data = None, is_blink = None, vel_data = None, l = 5, sample_rate=None, minimum_saccade_duration = 0.0075):
"""Uses the engbert & mergenthaler algorithm (PNAS 2006) to detect saccades.
This function expects a sequence (N x 2) of xy gaze position or velocity data.
Arguments:
xy_data (numpy.ndarray, optional): a sequence (N x 2) of xy gaze (float/integer) positions. Defaults to None
vel_data (numpy.ndarray, optional): a sequence (N x 2) of velocity data (float/integer). Defaults to None.
l (float, optional):determines the threshold. Defaults to 5 median-based standard deviations from the median
sample_rate (float, optional) - the rate at which eye movements were measured per second). Defaults to 1000.0
minimum_saccade_duration (float, optional) - the minimum duration for something to be considered a saccade). Defaults to 0.0075
Returns:
list of dictionaries, which each correspond to a saccade.
The dictionary contains the following items:
Raises:
ValueError: If neither xy_data and vel_data were passed to the function.
"""
# get a logger
logger = logging.getLogger(__name__)
logger.debug('Start.... Detecting Saccades')
# If xy_data and vel_data are both None, function can't continue
if xy_data is None and vel_data is None:
raise ValueError("Supply either xy_data or vel_data")
#If xy_data is given, process it
if not xy_data is None:
xy_data = np.array(xy_data)
if is_blink is None:
raise('error you have to give me blink data!')
# Calculate velocity data if it has not been given to function
if vel_data is None:
# # Check for shape of xy_data. If x and y are ordered in columns, transpose array.
# # Should be 2 x N array to use np.diff namely (not Nx2)
# rows, cols = xy_data.shape
# if rows == 2:
# vel_data = np.diff(xy_data)
# if cols == 2:
# vel_data = np.diff(xy_data.T)
vel_data = np.zeros(xy_data.shape)
vel_data[1:] = np.diff(xy_data, axis = 0)
else:
vel_data = np.array(vel_data)
inspect_vel = | pd.DataFrame(vel_data) | pandas.DataFrame |
from wf_core_data_dashboard import core
import wf_core_data
import mefs_utils
import pandas as pd
import inflection
import urllib.parse
import os
def generate_mefs_table_data(
test_events_path,
student_info_path,
student_assignments_path
):
test_events = pd.read_pickle(test_events_path)
student_info = pd.read_pickle(student_info_path)
student_assignments = | pd.read_pickle(student_assignments_path) | pandas.read_pickle |
# To add a new cell, type '#%%'
# To add a new markdown cell, type '#%% [markdown]'
#%%
from IPython import get_ipython
#%%
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import io
import base64
from matplotlib import animation
from matplotlib import cm
from matplotlib.pyplot import *
from sklearn.cluster import KMeans
from sklearn.neighbors import KNeighborsClassifier
from dateutil import parser
from IPython.display import HTML
from subprocess import check_output
import seaborn as sns
#%%
get_ipython().run_line_magic('matplotlib', 'inline')
#%%
plt.rcParams['patch.force_edgecolor'] = 'True'
plt.rcParams['figure.figsize'] = (16,10)
plt.rcParams['axes.unicode_minus'] = False
#%%
df_train = | pd.read_csv('train.csv') | pandas.read_csv |
# 爬取智联 武汉地区 所有.net 相关的 职位信息
import requests
from bs4 import BeautifulSoup
import pandas
pos=[]
headers={
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'zh-CN,zh;q=0.9,en;q=0.8',
'Connection':'keep-alive',
'Host':'sou.zhaopin.com',
'Upgrade-Insecure-Requests':'1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36'
}
for p in range(1,50):
url='http://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E6%AD%A6%E6%B1%89&kw=java&sm=0&p='+str(p)
re=requests.get(url,headers=headers)
bs=BeautifulSoup(re.text,'lxml')
tb=bs.find_all('table',class_="newlist")
for x in tb:
res={}
zw=x.find('td',class_="zwmc")
if(zw):
res["Posion"]=zw.find("a").text
gs=x.find('td',class_="gsmc")
if(gs):
res["Company"]=gs.text
xs=x.find('td',class_="zwyx")
if(xs):
res["Salary"]=xs.text
if(len(res)>0):
pos.append(res)
| pandas.DataFrame(pos) | pandas.DataFrame |
import pandas as pd
import pdb
import openpyxl
import html
import sys
import datetime
def main():
data = pd.read_excel('cancer_genes_with_pcgp_210921.xlsx',sheet_name='final',index_col=0)
# check args
try:
if sys.argv[1] == 'pcgp':
filterField = 'PCGP category (PMID 26580448)'
directory = 'byPCGP'
panelBuckets = getPCGP(data)
elif sys.argv[1] == 'mardis':
filterField = 'Preferred list (Mardis)'
directory = 'byMardis'
panelBuckets = getMardis(data)
elif sys.argv[1] == 'cgc':
filterField = 'CGC list'
directory = 'byCGC'
panelBuckets = getCGC(data)
elif sys.argv[1] == 'panel':
filterField = 'Panel'
directory = 'byPanel'
panelBuckets = getPanels(data)
else:
print('Invalid argument')
print('Defaulting to PCGP')
filterField = 'PCGP category (PMID 26580448)'
panelBuckets = getPCGP(data)
except:
panelBuckets = getPCGP(data)
# Write individual buckets out to file for debugging purposes
df3 = pd.DataFrame()
df3 = df3.assign(buckets=panelBuckets)
writer = pd.ExcelWriter('buckets.xlsx')
df3.to_excel(writer,sheet_name='buckets')
writer.save()
# For each individual case, just set the bucket ID as if it ignored the comma split? Probably the quickest way to fix
for i in panelBuckets:
fix = fixThePanel(i)
writeTSV(data,fix,filterField,directory)
def fixThePanel(bucket):
# From excel file, some buckets are messing up due to extra commas. This is a hard coded fix (hopefully a fix)
if bucket == 'CGC Genetics USA OncoRisk Expanded (NGS panel for 89 genes' or bucket == 'including CNV analysis)':
fixedBucket = 'CGC Genetics USA OncoRisk Expanded (NGS panel for 89 genes, including CNV analysis)'
elif bucket == 'Phosphorous' or bucket == 'Inc. Brain and Nervous System Cancer Panel':
fixedBucket = 'Phosphorous, Inc. Brain and Nervous System Cancer Panel'
elif bucket == 'CGC Genetics USA OncoRisk (NGS panel for 48 genes':
fixedBucket = 'CGC Genetics USA OncoRIsk (NGS panel for 48 genes, including CNV analysis)'
elif bucket == 'and PNS Cancer: Deletion/Duplication Panel':
fixedBucket = 'EGL Genetics Brain, CNS, and PNS Cancer: Deletion/Duplication Panel'
elif bucket == 'and PNS Cancer Panel: Sequencing and CNV Analysis':
fixedBucket = 'EGL Genetics Brain, CNS, and PNS Cancer Panel: Sequencing and CNV Analysis'
elif bucket == 'Mayo Clinic Laboratories Neuro-Oncology Expanded Gene Panel with Rearrangement':
fixedBucket = 'Mayo Clinic Laboratories Neuro-Oncology Expanded Gene Panel with Rearrangement Tumor'
else:
fixedBucket = bucket
return(fixedBucket)
def getPanels(df):
crosscheck = list()
for i in df['Panel']:
if type(i)==float:
print('FLOAT')
pass
else:
temp = i.split(', ')
for j in temp:
if j in crosscheck:
print('Already there')
else:
crosscheck.append(j)
return(crosscheck)
def getCGC(df):
# Do the same thing as getPanels, but for the CGC List
crosscheck = list()
for i in df['CGC list']:
if type(i)==float:
print('FLOAT')
pass
else:
temp = i.split(', ')
for j in temp:
if j in crosscheck:
print('Already there')
else:
crosscheck.append(j)
return(crosscheck)
def getPCGP(df):
# Do the same thing as getPanels, but for the PCGP List
crosscheck = list()
for i in df['PCGP category (PMID 26580448)']:
if type(i)==float:
print('FLOAT')
pass
else:
temp = i.split(';')
for j in temp:
if j in crosscheck:
print('Already there')
else:
crosscheck.append(j)
return(crosscheck)
def getMardis(df):
# Do the same thing as getPanels, but for the Mardis Preferred List
crosscheck = list()
for i in df['Preferred list (Mardis)']:
if type(i)==float:
print('FLOAT')
pass
else:
temp = i.split(', ')
for j in temp:
if j in crosscheck:
print('Already there')
else:
crosscheck.append(j)
return(crosscheck)
def writeTSV(df,bucket,filter_field,directory):
# Write function for saving split results. Change filename or whatever accordingly
filename = bucket.replace('/',' ')
filename = filename.replace('(',' ')
if bucket == 'EGL Genetics Brain' or bucket == 'CNS' or bucket == 'Tumor' or bucket == '':
return
try:
df2 = df[df[filter_field].str.contains(bucket)==True]
except:
print('Something went wrong sorting by bucket')
pdb.set_trace()
# Initialize excel file
try:
writer = | pd.ExcelWriter(directory + '/' + filename + '.xlsx') | pandas.ExcelWriter |
"""Tests for Table Schema integration."""
import json
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
from pandas import DataFrame
from pandas.core.dtypes.dtypes import (
PeriodDtype, CategoricalDtype, DatetimeTZDtype)
from pandas.io.json.table_schema import (
as_json_table_type,
build_table_schema,
make_field,
set_default_names)
class TestBuildSchema(object):
def setup_method(self, method):
self.df = DataFrame(
{'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'c'],
'C': pd.date_range('2016-01-01', freq='d', periods=4),
'D': pd.timedelta_range('1H', periods=4, freq='T'),
},
index=pd.Index(range(4), name='idx'))
def test_build_table_schema(self):
result = build_table_schema(self.df, version=False)
expected = {
'fields': [{'name': 'idx', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'},
{'name': 'D', 'type': 'duration'},
],
'primaryKey': ['idx']
}
assert result == expected
result = build_table_schema(self.df)
assert "pandas_version" in result
def test_series(self):
s = pd.Series([1, 2, 3], name='foo')
result = build_table_schema(s, version=False)
expected = {'fields': [{'name': 'index', 'type': 'integer'},
{'name': 'foo', 'type': 'integer'}],
'primaryKey': ['index']}
assert result == expected
result = build_table_schema(s)
assert 'pandas_version' in result
def test_series_unnamed(self):
result = build_table_schema(pd.Series([1, 2, 3]), version=False)
expected = {'fields': [{'name': 'index', 'type': 'integer'},
{'name': 'values', 'type': 'integer'}],
'primaryKey': ['index']}
assert result == expected
def test_multiindex(self):
df = self.df.copy()
idx = pd.MultiIndex.from_product([('a', 'b'), (1, 2)])
df.index = idx
result = build_table_schema(df, version=False)
expected = {
'fields': [{'name': 'level_0', 'type': 'string'},
{'name': 'level_1', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'},
{'name': 'D', 'type': 'duration'},
],
'primaryKey': ['level_0', 'level_1']
}
assert result == expected
df.index.names = ['idx0', None]
expected['fields'][0]['name'] = 'idx0'
expected['primaryKey'] = ['idx0', 'level_1']
result = build_table_schema(df, version=False)
assert result == expected
class TestTableSchemaType(object):
def test_as_json_table_type_int_data(self):
int_data = [1, 2, 3]
int_types = [np.int, np.int16, np.int32, np.int64]
for t in int_types:
assert as_json_table_type(np.array(
int_data, dtype=t)) == 'integer'
def test_as_json_table_type_float_data(self):
float_data = [1., 2., 3.]
float_types = [np.float, np.float16, np.float32, np.float64]
for t in float_types:
assert as_json_table_type(np.array(
float_data, dtype=t)) == 'number'
def test_as_json_table_type_bool_data(self):
bool_data = [True, False]
bool_types = [bool, np.bool]
for t in bool_types:
assert as_json_table_type(np.array(
bool_data, dtype=t)) == 'boolean'
def test_as_json_table_type_date_data(self):
date_data = [pd.to_datetime(['2016']),
pd.to_datetime(['2016'], utc=True),
pd.Series(pd.to_datetime(['2016'])),
pd.Series(pd.to_datetime(['2016'], utc=True)),
pd.period_range('2016', freq='A', periods=3)]
for arr in date_data:
assert as_json_table_type(arr) == 'datetime'
def test_as_json_table_type_string_data(self):
strings = [pd.Series(['a', 'b']), pd.Index(['a', 'b'])]
for t in strings:
assert as_json_table_type(t) == 'string'
def test_as_json_table_type_categorical_data(self):
assert as_json_table_type(pd.Categorical(['a'])) == 'any'
assert as_json_table_type(pd.Categorical([1])) == 'any'
assert as_json_table_type(pd.Series(pd.Categorical([1]))) == 'any'
assert as_json_table_type(pd.CategoricalIndex([1])) == 'any'
assert as_json_table_type(pd.Categorical([1])) == 'any'
# ------
# dtypes
# ------
def test_as_json_table_type_int_dtypes(self):
integers = [np.int, np.int16, np.int32, np.int64]
for t in integers:
assert as_json_table_type(t) == 'integer'
def test_as_json_table_type_float_dtypes(self):
floats = [np.float, np.float16, np.float32, np.float64]
for t in floats:
assert as_json_table_type(t) == 'number'
def test_as_json_table_type_bool_dtypes(self):
bools = [bool, np.bool]
for t in bools:
assert as_json_table_type(t) == 'boolean'
def test_as_json_table_type_date_dtypes(self):
# TODO: datedate.date? datetime.time?
dates = [np.datetime64, np.dtype("<M8[ns]"), PeriodDtype(),
DatetimeTZDtype('ns', 'US/Central')]
for t in dates:
assert as_json_table_type(t) == 'datetime'
def test_as_json_table_type_timedelta_dtypes(self):
durations = [np.timedelta64, np.dtype("<m8[ns]")]
for t in durations:
assert as_json_table_type(t) == 'duration'
def test_as_json_table_type_string_dtypes(self):
strings = [object] # TODO
for t in strings:
assert as_json_table_type(t) == 'string'
def test_as_json_table_type_categorical_dtypes(self):
# TODO: I think before is_categorical_dtype(Categorical)
# returned True, but now it's False. Figure out why or
# if it matters
assert as_json_table_type(pd.Categorical(['a'])) == 'any'
assert as_json_table_type(CategoricalDtype()) == 'any'
class TestTableOrient(object):
def setup_method(self, method):
self.df = DataFrame(
{'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'c'],
'C': pd.date_range('2016-01-01', freq='d', periods=4),
'D': pd.timedelta_range('1H', periods=4, freq='T'),
'E': pd.Series(pd.Categorical(['a', 'b', 'c', 'c'])),
'F': pd.Series(pd.Categorical(['a', 'b', 'c', 'c'],
ordered=True)),
'G': [1., 2., 3, 4.],
'H': pd.date_range('2016-01-01', freq='d', periods=4,
tz='US/Central'),
},
index=pd.Index(range(4), name='idx'))
def test_build_series(self):
s = pd.Series([1, 2], name='a')
s.index.name = 'id'
result = s.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
assert "pandas_version" in result['schema']
result['schema'].pop('pandas_version')
fields = [{'name': 'id', 'type': 'integer'},
{'name': 'a', 'type': 'integer'}]
schema = {
'fields': fields,
'primaryKey': ['id'],
}
expected = OrderedDict([
('schema', schema),
('data', [OrderedDict([('id', 0), ('a', 1)]),
OrderedDict([('id', 1), ('a', 2)])])])
assert result == expected
def test_to_json(self):
df = self.df.copy()
df.index.name = 'idx'
result = df.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
assert "pandas_version" in result['schema']
result['schema'].pop('pandas_version')
fields = [
{'name': 'idx', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'},
{'name': 'D', 'type': 'duration'},
{'constraints': {'enum': ['a', 'b', 'c']},
'name': 'E',
'ordered': False,
'type': 'any'},
{'constraints': {'enum': ['a', 'b', 'c']},
'name': 'F',
'ordered': True,
'type': 'any'},
{'name': 'G', 'type': 'number'},
{'name': 'H', 'type': 'datetime', 'tz': 'US/Central'}
]
schema = {
'fields': fields,
'primaryKey': ['idx'],
}
data = [
OrderedDict([('idx', 0), ('A', 1), ('B', 'a'),
('C', '2016-01-01T00:00:00.000Z'),
('D', 'P0DT1H0M0S'),
('E', 'a'), ('F', 'a'), ('G', 1.),
('H', '2016-01-01T06:00:00.000Z')
]),
OrderedDict([('idx', 1), ('A', 2), ('B', 'b'),
('C', '2016-01-02T00:00:00.000Z'),
('D', 'P0DT1H1M0S'),
('E', 'b'), ('F', 'b'), ('G', 2.),
('H', '2016-01-02T06:00:00.000Z')
]),
OrderedDict([('idx', 2), ('A', 3), ('B', 'c'),
('C', '2016-01-03T00:00:00.000Z'),
('D', 'P0DT1H2M0S'),
('E', 'c'), ('F', 'c'), ('G', 3.),
('H', '2016-01-03T06:00:00.000Z')
]),
OrderedDict([('idx', 3), ('A', 4), ('B', 'c'),
('C', '2016-01-04T00:00:00.000Z'),
('D', 'P0DT1H3M0S'),
('E', 'c'), ('F', 'c'), ('G', 4.),
('H', '2016-01-04T06:00:00.000Z')
]),
]
expected = OrderedDict([('schema', schema), ('data', data)])
assert result == expected
def test_to_json_float_index(self):
data = pd.Series(1, index=[1., 2.])
result = data.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
result['schema'].pop('pandas_version')
expected = (
OrderedDict([('schema', {
'fields': [{'name': 'index', 'type': 'number'},
{'name': 'values', 'type': 'integer'}],
'primaryKey': ['index']
}),
('data', [OrderedDict([('index', 1.0), ('values', 1)]),
OrderedDict([('index', 2.0), ('values', 1)])])])
)
assert result == expected
def test_to_json_period_index(self):
idx = pd.period_range('2016', freq='Q-JAN', periods=2)
data = pd.Series(1, idx)
result = data.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
result['schema'].pop('pandas_version')
fields = [{'freq': 'Q-JAN', 'name': 'index', 'type': 'datetime'},
{'name': 'values', 'type': 'integer'}]
schema = {'fields': fields, 'primaryKey': ['index']}
data = [OrderedDict([('index', '2015-11-01T00:00:00.000Z'),
('values', 1)]),
OrderedDict([('index', '2016-02-01T00:00:00.000Z'),
('values', 1)])]
expected = OrderedDict([('schema', schema), ('data', data)])
assert result == expected
def test_to_json_categorical_index(self):
data = pd.Series(1, pd.CategoricalIndex(['a', 'b']))
result = data.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
result['schema'].pop('pandas_version')
expected = (
OrderedDict([('schema',
{'fields': [{'name': 'index', 'type': 'any',
'constraints': {'enum': ['a', 'b']},
'ordered': False},
{'name': 'values', 'type': 'integer'}],
'primaryKey': ['index']}),
('data', [
OrderedDict([('index', 'a'),
('values', 1)]),
OrderedDict([('index', 'b'), ('values', 1)])])])
)
assert result == expected
def test_date_format_raises(self):
with pytest.raises(ValueError):
self.df.to_json(orient='table', date_format='epoch')
# others work
self.df.to_json(orient='table', date_format='iso')
self.df.to_json(orient='table')
def test_make_field_int(self):
data = [1, 2, 3]
kinds = [pd.Series(data, name='name'), pd.Index(data, name='name')]
for kind in kinds:
result = make_field(kind)
expected = {"name": "name", "type": 'integer'}
assert result == expected
def test_make_field_float(self):
data = [1., 2., 3.]
kinds = [pd.Series(data, name='name'), pd.Index(data, name='name')]
for kind in kinds:
result = make_field(kind)
expected = {"name": "name", "type": 'number'}
assert result == expected
def test_make_field_datetime(self):
data = [1., 2., 3.]
kinds = [pd.Series(pd.to_datetime(data), name='values'),
pd.to_datetime(data)]
for kind in kinds:
result = make_field(kind)
expected = {"name": "values", "type": 'datetime'}
assert result == expected
kinds = [pd.Series(pd.to_datetime(data, utc=True), name='values'),
| pd.to_datetime(data, utc=True) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 11:48:59 2020
@author: mazal
"""
"""
=========================================
Support functions of pydicom (Not sourced)
=========================================
Purpose: Create support functions for the pydicom project
"""
"""
Test mode 1 | Basics
testMode = True
reportMode = False
Test mode 2 | Function Report
testMode = False
reportMode = True
Commisionning mode
testMode = False
reportMode = False
"""
testMode = False
reportMode = False
"""
=========================================
Function 1: Aleatory Sampling
=========================================
Purpose: Build an aleatory sample given a train dataset of Kaggle for competition and a sample size
Raw code reference (see Tester.py): Test 5
"""
def trainDatasetSampler(samplingSize,testMode,reportMode):
# Set sampling size (% of the train population)
samplingSize = 5
# Build a Sampling dataset | Phase 1: Determine: (1) the source path of the train data; (2) the location path of the sampling
import os
import pandas as pd
path_source = 'Y:/Kaggle_OSIC/2-Data/train/'
path_source_test = 'Y:/Kaggle_OSIC/2-Data/test/'
path_destination = 'Y:/Kaggle_OSIC/4-Data (Sampling)/train/'
path_destination_test = 'Y:/Kaggle_OSIC/4-Data (Sampling)/test/'
path_destination_outcome = 'Y:/Kaggle_OSIC/4-Data (Sampling)/outcome/'
# Build a Sampling dataset | Phase 2: Build dataset using the following features from train data: (1) ID; (2) # of DICOM files per ID (including percentage).
## Improvement: (3) # of other registers (not related to DICOM files)
os.chdir(path_source)
ID_list = os.listdir(path_source)
ID_list_range = len(ID_list)
DICOMFile_list = []
DICOMFileNumber_list = []
for i in range(0,ID_list_range):
path_ID = path_source + ID_list[i] + '/'
DICOMFile_list_unitary = os.listdir(path_ID)
DICOMFile_list = DICOMFile_list + [DICOMFile_list_unitary]
DICOMFileNumber_list_unitary = len(DICOMFile_list_unitary)
DICOMFileNumber_list = DICOMFileNumber_list + [DICOMFileNumber_list_unitary]
Population_Dictionary = {'ID':ID_list,'NumberDicomFiles':DICOMFileNumber_list,'DicomFIles':DICOMFile_list}
Population_DataFrame = pd.DataFrame(data = Population_Dictionary)
DICOMFilePercentage_list = []
TotalNumberDicomFiles = sum(Population_DataFrame.NumberDicomFiles)
for j in range(0,ID_list_range):
Percentage = Population_DataFrame['NumberDicomFiles'][j] / TotalNumberDicomFiles * 100
Percentage = round(Percentage,6)
DICOMFilePercentage_list = DICOMFilePercentage_list + [Percentage]
Population_Percentage_Dictionary = {'Percentage':DICOMFilePercentage_list}
Population_Percentage_DataFrame = pd.DataFrame(data=Population_Percentage_Dictionary)
Population_DataFrame = pd.concat([Population_DataFrame, Population_Percentage_DataFrame],axis=1, sort=False)
filename_population = 'populationDataset.csv'
path_population = path_destination_outcome
Population_DataFrame.to_csv(path_population+filename_population)
# Build a Sampling dataset | Phase 3: Get an aleatory grouping of IDs (just tags)
import random
Population_DataFrame_IndexToSample=[]
Population_DataFrame_IDToSample=[]
Population_DataFrame_PercentageToSample=[]
samplingSizeGoal = 0
while (samplingSizeGoal <= samplingSize):
randomNumberTermination = len(Population_DataFrame.ID)
randomNumber = random.randrange(0,randomNumberTermination,1)
if (randomNumber not in Population_DataFrame_IndexToSample):
Population_DataFrame_IndexToSample = Population_DataFrame_IndexToSample + [randomNumber]
ID_unitary = Population_DataFrame.ID[randomNumber]
Population_DataFrame_IDToSample = Population_DataFrame_IDToSample + [ID_unitary]
Percentage_unitary = Population_DataFrame.Percentage[randomNumber]
Population_DataFrame_PercentageToSample = Population_DataFrame_PercentageToSample + [Percentage_unitary]
samplingSize_unitary = Population_DataFrame.Percentage[randomNumber]
samplingSizeGoal = samplingSizeGoal + samplingSize_unitary
samplingDataset_Dictionary = {'Index':Population_DataFrame_IndexToSample,'ID':Population_DataFrame_IDToSample,'Percentage':Population_DataFrame_PercentageToSample}
samplingDataset_DataFrame = pd.DataFrame(data=samplingDataset_Dictionary)
filename_sampling = 'samplingDataset.csv'
path_sampling = path_destination_outcome
samplingDataset_DataFrame.to_csv(path_sampling+filename_sampling)
# Build a Sampling dataset | Phase 3: Get train dataset (an aleatory grouping of IDs; tree-copy task)
from distutils.dir_util import create_tree
from distutils.dir_util import remove_tree
from distutils.dir_util import copy_tree
remove_tree(path_destination)
create_tree(path_destination,[])
if testMode == True:
print("=========================================")
print("Building the Sampling Dataset given the Train Dataset of Kaggle for competition")
print("=========================================")
for k in Population_DataFrame_IDToSample:
path_source_unitary = path_source + k + '/'
path_destination_unitary = path_destination + k + '/'
create_tree(path_destination_unitary,[])
copy_tree(path_source_unitary,path_destination_unitary)
if testMode == True: print("ID tree copied: ",k)
# Build a Sampling dataset | Phase 4: Get test dataset (tree-copy task)
## Assumption: The complete test dataset is copied.
from distutils.dir_util import create_tree
from distutils.dir_util import remove_tree
from distutils.dir_util import copy_tree
remove_tree(path_destination_test)
create_tree(path_destination_test,[])
if testMode == True:
print("=========================================")
print("Building the Test Dataset given the Test Dataset of Kaggle for competition")
print("=========================================")
IDList_test = os.listdir(path_source_test)
for l in IDList_test:
path_source_unitary = path_source + l + '/'
path_destination_unitary = path_destination_test + l + '/'
create_tree(path_destination_unitary,[])
copy_tree(path_source_unitary,path_destination_unitary)
if testMode == True: print("ID tree copied: ",l)
if (testMode == False and reportMode == True):
from datetime import date
reportDate = date.today()
print("=========================================")
print("Function Report | Date:",reportDate.year,'/',reportDate.month,'/',reportDate.day,'/' )
print("=========================================")
print("Function: trainDatasetSampler(samplingSize,testMode)")
print("=========================================")
print("(1) Inputs")
print("=========================================")
print("-Sampling Size :", samplingSize, "%")
print("-Test Mode : False")
print("=========================================")
print("(2) Outputs")
print("=========================================")
print("-Type of sample: Aleatory based on IDs")
print("-Train dataset percentage to sample (base): ", round(abs(samplingSize),6),"%")
print("-Train dataset percentage to sample (adjustment): ", round(abs(samplingSizeGoal-samplingSize),6),"%")
print("-Train dataset percentage to sample (fitted): ", round(samplingSizeGoal,6),"%")
print("-Population of Train dataset (just information) available in file: ", filename_population)
print("-Sample of Train dataset (just information) available in file: ", filename_sampling)
print("=========================================")
print("(2) Outcomes:")
print("=========================================")
print("Being the outcome expressed under the variable result, outcomes are as follows:")
print("result[0] -> Dataframe for Population")
print("result[1] -> Dataframe for Sample")
print("result[2] -> Test Mode")
print("result[3] -> Rerport Mode")
print("=========================================")
return Population_DataFrame, samplingDataset_DataFrame, testMode, reportMode
if testMode == True:
samplingSize = 5
resultFunction1 = trainDatasetSampler(samplingSize,testMode,reportMode)
print("=========================================")
print("Population dataset:")
print("=========================================")
print(resultFunction1[0])
print("=========================================")
print("Population dataset:")
print("=========================================")
print(resultFunction1[1])
print("=========================================")
print("Test result Function 1: Success")
print("=========================================")
"""
=========================================
Function 2: Submission Builder
=========================================
Purpose: Build a submission CSV file
Raw code reference (see Tester.py): Test 8
"""
def SubmissionBuilder(ProductType,filename,testMode):
import os
import pandas as pd
# Set ProductType
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
# Set productType and splitType
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
# Set outcome
path_outcome = path_ProductType + 'outcome/'
# Get raw data as a DataFrame
os.chdir(path_outcome)
rawFile_DataFrame = pd.read_csv('submissionRawFile_2020_09_19.csv')
# Get submission file template as a DataFrame
os.chdir(path_ProductType)
submissionFile_DataFrame = pd.read_csv('sample_submission.csv')
# Get submission data as required in submission file
submissionNumber_range = len(rawFile_DataFrame.index)
IDcases_List = submissionFile_DataFrame.Patient_Week.copy()
IDcases_List = IDcases_List[0:5]
IDcases_List_range = len(IDcases_List)
for i in range (0,IDcases_List_range):
IDcases_List[i] = IDcases_List[i][:-4]
# Get submission data as required in submission file | FVC
FVCDataList = []
for k in range(0,submissionNumber_range):
for j in IDcases_List:
# Get datum in raw data
IDlabel_rawFile = str(j)+str('_FVC')
datum = rawFile_DataFrame[IDlabel_rawFile][k]
datum = round(datum,0)
# Set datum in submission file
FVCDataList = FVCDataList + [datum]
submissionFile_DataFrame['FVC'] = FVCDataList
# Get submission data as required in submission file | Confidence
CONDataList = []
for k in range(0,submissionNumber_range):
for j in IDcases_List:
# Get datum in raw data
IDlabel_rawFile = str(j)+str('_CON')
datum = rawFile_DataFrame[IDlabel_rawFile][k]
datum = round(datum,0)
# Set datum in submission file
CONDataList = CONDataList + [datum]
submissionFile_DataFrame['Confidence'] = CONDataList
# Save file | Get directory
path_destination = path_outcome+'submissions/'
try:
os.chdir(path_destination)
GetCreation = True
except FileNotFoundError:
GetCreation = False
if GetCreation == False:
from distutils.dir_util import mkpath
mkpath(path_destination)
os.chdir(path_destination)
submissionList = os.listdir(path_destination)
number = len(submissionList)
filename = 'submission_'+str(number+1)+'.csv'
submissionFile_DataFrame.to_csv(filename, index=False)
return submissionFile_DataFrame, filename, testMode
if testMode == True:
ProductType = 'population'
filename = 'submissionRawFile_2020_09_19.csv'
resultFunction2 = SubmissionBuilder(ProductType,filename,testMode)
print("=========================================")
print("Product Type:")
print("=========================================")
print(ProductType)
print("=========================================")
print("Submission File saved as:")
print("=========================================")
print(resultFunction2[1])
print("=========================================")
print("Test result Function 2: Success")
print("=========================================")
"""
=========================================
Function 3: Dataset builder (Stacking solution case) to process with ML models
=========================================
Purpose: Build an input dataset to be processed with an stacking solution
Raw code reference (see Tester.py): Test 15
"""
def stacking_Dataset_Builder(ProductType, PydicomMode, reportMode, testMode):
# Set Product Type and its corresponding path
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
# Set working directory
import os
os.chdir(path_ProductType)
# Get train dataset and test dataset
import pandas as pd
filename_trainDataset = 'train.csv'
train_dataset = pd.read_csv(path_ProductType+filename_trainDataset)
filename_testDataset = 'test.csv'
test_dataset = pd.read_csv(path_ProductType+filename_testDataset)
# Get submission dataset (template)
import numpy as np
path_resources = 'Y:/Kaggle_OSIC/3-Data (Prototype)/resources/'
if (PydicomMode == False):
filename_submissionDataset = 'submissionInputDataset.csv'
else:
filename_submissionDataset = 'submissionInputDataset_pydicom.csv'
submission_dataset = pd.read_csv(path_resources+filename_submissionDataset)
submission_dataset = submission_dataset.replace(np.nan,'iNaN')
# Adjust train dataset | Phase 1: Get ID list of the test dataset
IDList = list(test_dataset.Patient)
# Adjust train dataset | Phase 2: Get submission instances from train dataset
instancesPopulation = len(train_dataset.Patient)
indexList = []
for i in IDList:
for j in range(0,instancesPopulation):
if i == train_dataset.Patient[j]:
indexToInclude = train_dataset.index[j]
indexList = indexList + [indexToInclude]
# Adjust train dataset | Phase 3: Create an adjusted train dataset | a. Remove test instances from train dataset and reset index
train_dataset_adjusted = train_dataset.drop(indexList)
train_dataset_adjusted.reset_index
# Adjust train dataset | Phase 3: Create an adjusted train dataset | b. Get Transferring data from train dataset
instanceToTrasferList_index = []
for k in range(0,instancesPopulation):
for l in IDList:
if train_dataset.Patient[k] == l:
instanceToTransfer_Index = train_dataset.index[k]
instanceToTrasferList_index = instanceToTrasferList_index + [instanceToTransfer_Index]
train_dataset_instancesToTransfer = train_dataset.take(instanceToTrasferList_index)
train_dataset_instancesToTransfer.index
train_dataset_instancesToTransfer = train_dataset_instancesToTransfer.reset_index()
train_dataset_instancesToTransfer.drop(columns='index')
# Adjust train dataset | Phase 3: Create an adjusted train dataset | c. Update the submission dataset with the transferring data in b.
submission_dataset_range = len(submission_dataset.Patient)
train_dataset_instancesToTransfer_range = len(train_dataset_instancesToTransfer.Patient)
Patient_List = []
Week_List = []
FVC_List = []
Percent_List = []
Age_List = []
Sex_List = []
SmokingStatus_List = []
for m in range (0,submission_dataset_range):
timesCopy = 0
if(submission_dataset.Patient[m] in IDList):
referenceWeek = submission_dataset.Weeks[m]
for n in range (0,train_dataset_instancesToTransfer_range):
if(train_dataset_instancesToTransfer.Patient[n] == submission_dataset.Patient[m] and train_dataset_instancesToTransfer.Weeks[n] == referenceWeek):
if (timesCopy == 0):
submission_dataset.FVC[m] = train_dataset_instancesToTransfer.FVC[n]
submission_dataset.Percent[m] = train_dataset_instancesToTransfer.Percent[n]
submission_dataset.Age[m] = train_dataset_instancesToTransfer.Age[n]
submission_dataset.Sex[m] = train_dataset_instancesToTransfer.Sex[n]
submission_dataset.SmokingStatus[m] = train_dataset_instancesToTransfer.SmokingStatus[n]
timesCopy = timesCopy + 1
else:
# Additional instances to include
Patient_List = Patient_List + [train_dataset_instancesToTransfer.Patient[n]]
Week_List = Week_List + [train_dataset_instancesToTransfer.Weeks[n]]
FVC_List = FVC_List + [train_dataset_instancesToTransfer.FVC[n]]
Percent_List = Percent_List + [train_dataset_instancesToTransfer.Percent[n]]
Age_List = Age_List + [train_dataset_instancesToTransfer.Age[n]]
Sex_List = Sex_List + [train_dataset_instancesToTransfer.Sex[n]]
SmokingStatus_List = SmokingStatus_List + [train_dataset_instancesToTransfer.SmokingStatus[n]]
# Adjust train dataset | Phase 3: Create an adjusted train dataset | d. Add common values to submission dataset given those from the test dataset (Features: Age, Sex, SmokingStatus)
submission_dataset_range = len(submission_dataset.Patient)
for o in range(0,submission_dataset_range):
if(submission_dataset.Patient[o] in IDList):
for p in range(0,train_dataset_instancesToTransfer_range):
if(submission_dataset.Patient[o] == train_dataset_instancesToTransfer.Patient[p]):
submission_dataset.Age[o] = train_dataset_instancesToTransfer.Age[p]
submission_dataset.Sex[o] = train_dataset_instancesToTransfer.Sex[p]
submission_dataset.SmokingStatus[o] = train_dataset_instancesToTransfer.SmokingStatus[p]
# Scenario to replace NaN values: Average FVC for a given Patient
averageFVC = train_dataset_instancesToTransfer.FVC[train_dataset_instancesToTransfer.Patient == train_dataset_instancesToTransfer.Patient[p]].mean()
submission_dataset.FVC[o] = averageFVC
# Adjust train dataset | Phase 4: Create an adjusted train dataset | e. Concatenate the submission dataset (and additional instance) and the adjusted train dataset
additionalDictionary = {submission_dataset.columns[0]:Patient_List,
submission_dataset.columns[1]:Week_List,
submission_dataset.columns[2]:FVC_List,
submission_dataset.columns[3]:Percent_List,
submission_dataset.columns[4]:Age_List,
submission_dataset.columns[5]:Sex_List,
submission_dataset.columns[6]:SmokingStatus_List}
additional_dataset = pd.DataFrame(data=additionalDictionary)
frames = [train_dataset_adjusted,submission_dataset,additional_dataset]
train_dataset_adjusted = pd.concat(frames)
train_dataset_adjusted = train_dataset_adjusted.reset_index()
train_dataset_adjusted = train_dataset_adjusted.drop(columns='index')
# Adjust train dataset with pydicom train dataset) | Phase 1: Get pydicom train dataset
if(PydicomMode == True):
filename_pydicom = 'train_pydicom.csv'
path_ProductType_pydicom = path_ProductType + 'outcome/'
train_dataset_pydicom = pd.read_csv(path_ProductType_pydicom + filename_pydicom)
# Adjust train dataset with pydicom train dataset) | Phase 2: Include values from train_adjusted_pydicom.py into adjusted train dataset
if(PydicomMode == True):
instancesToInclude_List = list(train_dataset_pydicom.Patient)
InstanceToInclude_Patient = i
newIndex = len(train_dataset_adjusted.Patient)
for i in instancesToInclude_List:
# Get instance to transfer
InstanceToInclude_Patient = i
InstanceToInclude_Week = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].Weeks)[0]
InstanceToInclude_indexType1_Exhalation = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].indexType1_Exhalation)[0]
InstanceToInclude_indexType1_Inhalation = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].indexType1_Inhalation)[0]
InstanceToInclude_ImageType = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].ImageType)[0]
# Put instance into train_dataset_adjusted DataFrame
if (0 in list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Weeks)):
# Get index
indexToComplete = list(train_dataset_adjusted[train_dataset_adjusted.Weeks == 0].Patient[train_dataset_adjusted.Patient == i].index)
# Complete instance
train_dataset_adjusted.indexType1_Exhalation[indexToComplete] = InstanceToInclude_indexType1_Exhalation
train_dataset_adjusted.indexType1_Inhalation[indexToComplete] = InstanceToInclude_indexType1_Inhalation
train_dataset_adjusted.ImageType[indexToComplete] = str(InstanceToInclude_ImageType)
else:
# Add new instance
## Get repeatable instances
repeatableInstance1 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].FVC)[0]
repeatableInstance2 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Percent)[0]
repeatableInstance3 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Age)[0]
repeatableInstance4 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Sex)[0]
repeatableInstance5 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].SmokingStatus)[0]
## Get Dictionary
DictionaryToInclude = {}
DictionaryToInclude['Patient'] = InstanceToInclude_Patient
DictionaryToInclude['Weeks'] = InstanceToInclude_Week
DictionaryToInclude['FVC'] = repeatableInstance1
DictionaryToInclude['Percent'] = repeatableInstance2
DictionaryToInclude['Age'] = repeatableInstance3
DictionaryToInclude['Sex'] = repeatableInstance4
DictionaryToInclude['SmokingStatus'] = repeatableInstance5
DictionaryToInclude['indexType1_Exhalation'] = InstanceToInclude_indexType1_Exhalation
DictionaryToInclude['indexType1_Inhalation'] = InstanceToInclude_indexType1_Inhalation
DictionaryToInclude['ImageType'] = str(InstanceToInclude_ImageType)
## Get DataFrame
DataFrameToInclude = pd.DataFrame(data = DictionaryToInclude, index=[newIndex])
newIndex = newIndex + 1
## Concatenate DataFrame
train_dataset_adjusted = pd.concat([train_dataset_adjusted, DataFrameToInclude])
# nan filling
train_dataset_adjusted = train_dataset_adjusted.replace('iNaN',np.nan)
# Specifying dtype
train_dataset_adjusted.astype({'Patient': 'O'}).dtypes
train_dataset_adjusted.astype({'Weeks': 'float64'}).dtypes
train_dataset_adjusted.astype({'Percent': 'float64'}).dtypes
train_dataset_adjusted.astype({'Age': 'float64'}).dtypes
train_dataset_adjusted.astype({'Sex': 'O'}).dtypes
train_dataset_adjusted.astype({'SmokingStatus': 'O'}).dtypes
train_dataset_adjusted.astype({'FVC': 'float64'}).dtypes
if(PydicomMode == True):
train_dataset_adjusted.astype({'indexType1_Exhalation': 'float64'}).dtypes
train_dataset_adjusted.astype({'indexType1_Inhalation': 'float64'}).dtypes
train_dataset_adjusted.astype({'ImageType': 'O'}).dtypes
# Get CSV file
path_output = path_ProductType +'outcome/'
if(PydicomMode == False):
filename_output = 'train_adjusted.csv'
else:
filename_output = 'train_adjusted_pydicom.csv'
train_dataset_adjusted.to_csv(path_output+filename_output)
# Function Result
resultFunction = train_dataset_adjusted,path_output,filename_output
# Report Mode
if reportMode == True:
print("=========================================")
print("Function Report")
print("=========================================")
print("DataFrame")
print("=========================================")
print(resultFunction[0])
print("=========================================")
print("Product Type: ", ProductType)
print("=========================================")
print("Pydicom Mode: ", PydicomMode)
print("=========================================")
print("Location of Input File:", resultFunction[1])
print("=========================================")
print("Input File saved as:", resultFunction[2])
print("=========================================")
print("Data type of the dataset")
print("=========================================")
print(resultFunction[0].dtypes)
print("=========================================")
print("Test result Function 3: Success")
print("=========================================")
return resultFunction
if testMode == True:
ProductType = 'prototype'
PydicomMode = True
reportMode = False
resultFunction3 = stacking_Dataset_Builder(ProductType, PydicomMode, reportMode, testMode)
print("=========================================")
print("Function Report")
print("=========================================")
print("DataFrame")
print("=========================================")
print(resultFunction3[0])
print("=========================================")
print("=========================================")
print("Product Type: ", ProductType)
print("=========================================")
print("Pydicom Mode: ", PydicomMode)
print("=========================================")
print("Location of Input File:", resultFunction3[1])
print("=========================================")
print("Input File saved as:", resultFunction3[2])
print("=========================================")
print("Data type of the dataset")
print("=========================================")
print(resultFunction3[0].dtypes)
print("=========================================")
print("Test result Function 3: Success")
print("=========================================")
"""
=========================================
Function 4: Submission dataset builder (Stacking solution case) after ML outcome
=========================================
Purpose: Build a submission CSV file (Stacking solution case)
Raw code reference (see Tester.py): Test 17
About the Shape Parameter: It amounts to c = 0.12607421874999922 for every instance in the oject of concern. c value has been computed
deeming the following data fitting scope: (1) Data: FVC predictions; (2) Probability density function as follows (staistical function
in scipy renowend as scipy.stats.loglaplace): loglaplace.pdf(x, c, loc=0, scale=1).
"""
def Stacking_Submission_Dataset_Builder(ProductType,shapeParameter_DataFrame,pydicomMode,testMode):
# Set Product Type and its corresponding path
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
# Set working directory
import os
os.chdir(path_ProductType + 'outcome/')
# Get result data and test dataset
import pandas as pd
if(pydicomMode == True):
filename_resultDataset = 'result_pydicom.csv'
else:
filename_resultDataset = 'result.csv'
result_dataset = pd.read_csv(path_ProductType+'outcome/'+filename_resultDataset)
filename_testDataset = 'test.csv'
test_dataset = pd.read_csv(path_ProductType+filename_testDataset)
# Get submission instances | Phase 1: Index
IDList = list(test_dataset.Patient)
IDList_index_dictionary = {}
for i in IDList:
itemToInclude = result_dataset.Patient[result_dataset.Patient==i].index
IDList_index_dictionary[i] = itemToInclude
# Get submission instances | Phase 2: Extract submission instances from result dataset
IDList_index = []
IDList_columns = ['Patient', 'Weeks', 'Random Forest', 'Lasso', 'Gradient Boosting', 'Stacking Regressor']
for j in IDList: IDList_index = IDList_index + list(IDList_index_dictionary[j])
submission_dataset = result_dataset.loc[IDList_index]
# Get submission instances | Phase 3: Extract duplicated instances
submission_dataset = submission_dataset.drop_duplicates(subset=['Patient','Weeks'])
# Get submission instances | Phase 4: Sort submission instances by Weeks (ascending) and reset index
submission_dataset = submission_dataset.sort_values(by=['Weeks','Patient'])
submission_dataset = submission_dataset.reset_index()
submission_dataset = submission_dataset.drop(columns=['Unnamed: 0','index'])
# Get confidence measure | Phase 1: Get shape Parameter DataFrame by default
## When shapeParameter_DataFrame==[], parameter c = 0.126074 is assigned by default per model and ID
if (shapeParameter_DataFrame == []):
shapeParameter_dictionary = {}
shapeParameter = 0.126074
MLModelList = IDList_columns[2:]
for l in MLModelList:
keyShapeParameter = 'c Parameter_'+l
shapeParameter_dictionary[keyShapeParameter] = [shapeParameter,shapeParameter,shapeParameter,shapeParameter,shapeParameter]
shapeParameter_DataFrame = pd.DataFrame(data = shapeParameter_dictionary, index = IDList)
# Get confidence measure | Phase 2: Get standard-deviation-clipped per instance
## Metric - Part 1: standard_deviation_clipped = max(standard_deviation, 70)
## Build a DataFrame with standard-deviation-clipped values given an ID and a ML Model: standardDeviationClipped_DataFrame
standardDeviationClipped_DataFrame = shapeParameter_DataFrame.copy()
columnLabels = list(standardDeviationClipped_DataFrame.columns)
columnLabels_SDC_dictionary = {}
for i in columnLabels:
columnLabels_item ='SD_Clipped'+i[11:]
columnLabels_SDC_dictionary[i]=columnLabels_item
standardDeviationClipped_DataFrame = standardDeviationClipped_DataFrame.rename(columns=columnLabels_SDC_dictionary)
import numpy as np
standardDeviationClipped_DataFrame = standardDeviationClipped_DataFrame.replace(3,np.nan)
ID_List = list(standardDeviationClipped_DataFrame.index)
SDModel_List = list(standardDeviationClipped_DataFrame.columns)
CParameter_List = list(shapeParameter_DataFrame.columns)
numy = 0
from scipy.stats import loglaplace
for j in ID_List:
for k in SDModel_List:
itemToInclude = CParameter_List[numy]
c = shapeParameter_DataFrame[itemToInclude][j]
sd_LL = loglaplace.std(c, loc=0, scale=100)
standardDeviationClipped_DataFrame[k][j] = max(70,sd_LL) # j: index is ID | k: SD_Clipped_(ML Model)
numy = numy + 1
numy = 0
# Get confidence measure | Phase 3: Get metric axe per model: |FVC_true - FVC_predicted|
## Metric - Part 1: |FVC_true - FVC_pred|
if(pydicomMode == True):
variableNumber = 10
else:
variableNumber = 7
MLModelList = list(submission_dataset.columns[variableNumber:])
metric_dictionary = {}
for j in MLModelList:
metric_differential = abs(submission_dataset.FVC - submission_dataset[j])
metric_differential = list(metric_differential)
keyToInclude = 'metric_'+j
metric_dictionary[keyToInclude] = metric_differential
metric_DataFrame = pd.DataFrame(data=metric_dictionary)
# Get confidence measure | Phase 4: Get metric axe per model: min(|FVC_true - FVC_predicted|, 1000)
## metric per instance
## Metric - Part 2: min(|FVC_true - FVC_pred|,1000)
metricLabels = list(metric_DataFrame.columns)
instancesNumber = len(submission_dataset.index)
for i in metricLabels:
j = 0
while (j<instancesNumber):
metric_DataFrame[i][j] = min(metric_DataFrame[i][j],1000)
j = j+1
submission_dataset = submission_dataset.join(metric_DataFrame)
# Get confidence measure | Phase 5: Get metric axe per model: (-1 * differential * 2^0.5 / SDC ) - ln(2^0.5 * SCD)
## metric per instance
## differential = min(|FVC_true - FVC_predicted|, 1000)
## SDC: Standard Deviation Clipped
## Metric - Part 2: min(|FVC_true - FVC_pred|,1000)
IDList = list(test_dataset.Patient)
SDModel_List = list(standardDeviationClipped_DataFrame.columns)
SDModel_index_List = list(standardDeviationClipped_DataFrame.index)
metric_lists = list(metric_DataFrame.columns)
metric_index_lists = list(metric_DataFrame.index)
submission_dataset_index_List = list(submission_dataset.index)
instancesNumber = len(submission_dataset_index_List)
indexPerID_dictionary = {}
### Step 1: Get index per ID to compute
for i in IDList:
listToInclude = list(submission_dataset.Patient[submission_dataset.Patient == i].index)
indexPerID_dictionary[i] = listToInclude
indexPerID_DataFrame = pd.DataFrame(data=indexPerID_dictionary)
### Step 3: Compute metric
import math
from math import log1p
for k in IDList:
for i in metric_lists:
for j in list(indexPerID_DataFrame[k]):
differential = submission_dataset[i][j]
SDC_Label = 'SD_Clipped_' + i[7:]
SDC = standardDeviationClipped_DataFrame[SDC_Label][k]
metric_part1 = -1* 2**0.5 * differential / SDC
metric_part2 = -1 * math.log1p(2**0.5 * SDC)
metric = metric_part1 + metric_part2
submission_dataset[i][j] = metric
# Result function specification
resultFunction = submission_dataset,shapeParameter_DataFrame,standardDeviationClipped_DataFrame
# Get submission files | Phase 1: Get submission file template
filename = 'sample_submission.csv'
submissionFile = pd.read_csv(path_ProductType+filename)
## Get submission files | Phase 2: Create directory
try:
path_output = path_ProductType + 'submission/'
os.chdir(path_output)
except FileNotFoundError:
import distutils.ccompiler
path_output = path_ProductType + 'submission/'
distutils.dir_util.mkpath(path_output)
## Get submission files | Phase 3: Get correlative
files_list = os.listdir(path_output)
try:
maxNumber = max(files_list)
maxNumber = maxNumber[:-4]
maxNumber = int(maxNumber)
nextNumber = maxNumber+1
except ValueError:
nextNumber = 0
## Get submission files | Phase 4: Get models to include and their corresponding metrics
ModelToInclude = IDList_columns[2:]
## Get submission files | Phase 5: Build Files
for i in ModelToInclude:
filename = 'sample_submission.csv'
submissionFile = | pd.read_csv(path_ProductType+filename) | pandas.read_csv |
# -*- coding: utf-8 -*-
from datetime import timedelta, time
import numpy as np
from pandas import (DatetimeIndex, Float64Index, Index, Int64Index,
NaT, Period, PeriodIndex, Series, Timedelta,
TimedeltaIndex, date_range, period_range,
timedelta_range, notnull)
import pandas.util.testing as tm
import pandas as pd
from pandas.lib import Timestamp
from .common import Base
class DatetimeLike(Base):
def test_shift_identity(self):
idx = self.create_index()
self.assert_index_equal(idx, idx.shift(0))
def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
self.assertFalse("length=%s" % len(idx) in str(idx))
self.assertTrue("'foo'" in str(idx))
self.assertTrue(idx.__class__.__name__ in str(idx))
if hasattr(idx, 'tz'):
if idx.tz is not None:
self.assertTrue(idx.tz in str(idx))
if hasattr(idx, 'freq'):
self.assertTrue("freq='%s'" % idx.freqstr in str(idx))
def test_view(self):
super(DatetimeLike, self).test_view()
i = self.create_index()
i_view = i.view('i8')
result = self._holder(i)
tm.assert_index_equal(result, i)
i_view = i.view(self._holder)
result = self._holder(i)
tm.assert_index_equal(result, i_view)
class TestDatetimeIndex(DatetimeLike, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makeDateIndex(10))
self.setup_indices()
def create_index(self):
return date_range('20130101', periods=5)
def test_shift(self):
# test shift for datetimeIndex and non datetimeIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05',
'2013-01-06'], freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(-1)
expected = DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02',
'2013-01-03', '2013-01-04'],
freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D')
expected = DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09',
'2013-01-10',
'2013-01-11'], freq='D')
self.assert_index_equal(result, expected)
def test_construction_with_alt(self):
i = pd.date_range('20130101', periods=5, freq='H', tz='US/Eastern')
i2 = DatetimeIndex(i, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
self.assert_index_equal(i2, expected)
# incompat tz/dtype
self.assertRaises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_pickle_compat_construction(self):
pass
def test_construction_index_with_mixed_timezones(self):
# GH 11488
# no tz results in DatetimeIndex
result = Index(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
# passing tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 19:00'),
Timestamp('2011-01-03 00:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
def test_construction_index_with_mixed_timezones_with_NaT(self):
# GH 11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
# passing tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 19:00'),
pd.NaT, Timestamp('2011-01-03 00:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# different tz coerces tz-naive to tz-awareIndex(dtype=object)
result = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 05:00'),
Timestamp('2011-01-02 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with tm.assertRaises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
with tm.assertRaises(TypeError):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with tm.assertRaises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
def test_astype(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timestamp('2016-05-16')] + [NaT] * 3, dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([1463356800000000000] +
[-9223372036854775808] * 3, dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_index_equal(result, | Index(rng.asi8) | pandas.Index |
import streamlit as st
import pandas as pd
import numpy as np
import datetime
import plotly.express as px
import base64
def app():
LOGO_IMAGE_IBM = "apps/ibm.png"
LOGO_IMAGE_U_OF_F = "apps/u_of_f.svg.png"
LOGO_IMAGE_BRIGHTER = "apps/brighter_potential_logo.png"
st.markdown(
"""
<style>
.container {
display: flex;
}
.logo-text {
font-weight:700 !important;
font-size:50px !important;
color: #f9a01b !important;
padding-top: 75px !important;
}
.logo-img {
float: left;
position: relative;
margin-top: 600px;
}
#logo {
position: absolute;
float: right;
}
</style>
""",
unsafe_allow_html=True
)
st.markdown(
f"""
<img class="logo-img" src="data:image/png;base64,{base64.b64encode(open(LOGO_IMAGE_IBM, "rb").read()).decode()}" width="100x`" height="40" style="border:20px;margin:0px" />
<img class="logo-img" src="data:image/png;base64,{base64.b64encode(open(LOGO_IMAGE_U_OF_F, "rb").read()).decode()}" width="200" height="40" style="border:20px;margin:0px"/>
              
          
<img class="logo" src="data:image/png;base64,{base64.b64encode(open(LOGO_IMAGE_BRIGHTER, "rb").read()).decode()}" width="100" height="100" />
""",
unsafe_allow_html=True
)
st.markdown('---')
st.header("Solar Rooftop Potential Prediction")
# Sidebar
st.sidebar.header('Choose Time Range to View:')
min_date = st.sidebar.date_input('Min Date', datetime.datetime(2019, 1, 1))
min_date = min_date.strftime('%m/%d/%y')
max_date = st.sidebar.date_input('Max Date', datetime.datetime(2019, 12, 31))
max_date = max_date.strftime('%m/%d/%y')
st.sidebar.header('Choose Zipcode to View:')
# Declare zipcode list
zipcodes = [33131,33040,34112,33916,33407,33935,33471,33950,
34266,34994,34972,34236,34950,34205,33873,32960,33830,33606,33755,34741,33525,32806,34601,
32796,33513,32778,32771,34453,32720,34471,32621,32110,32601,32177,32456,32080,32091,32054,
32066,32347,32401,32327,
32025,32064,32063,32202,32502,32503,32424,32321,32304,32340,32344,
32351,32570,32034,32433,32536,32428,32448,32425,32602,32603,32604,32605,32606,
32607,32608,32609,32610,32611,32612,32614,32627,32641,32653,32402,32404,32405,32406,
32412,32073,32081,32099,32201,32203,32204,32205,32206,32207,32208,32209,
32210,32211,32212,32214,32216,32217,32218,32219,32220,32221,32222,32223,32224,
32225,32226,32227,32228,32229,32233,32234,32235,32236,32237,32238,32239,
32241,32244,32245,32246,32247,32250,32254,32255,32256,32257,32258,32266,32277,
32501,32504,32505,32514,32520,32522,32523,32524,32591,33601,33602,33603,33604,
33605,33607,33608,33609,33610,33611,33612,33613,33615,33616,33617,33619,
33620,33621,33622,33623,33629,33630,33631,33633,33634,33637,33646,33647,33650,33655,33660,33661,
33662,33664,33672,33673,33674,33675,33677,33679,33680,33681,33686,33901,
33902,33903,33905,33906,33907,33911,33912,33913,33917,33919,33966,33971,33990,32301,32302,32303,32305,32306,
32307,32308,32309,32310,32311,32312,32313,32314,32316,32317,32395,32399,
33101,33109,33111,33114,33125,33126,33127,33128,33129,33130,33132,33133,33134,33135,33136,33137,33138,
33139,33140,33142,33144,33145,33146,33147,33149,33150,33151,33159,33222,33233,33234,
33238,33242,33245,33255,32789,32801,32802,32803,32804,32805,32807,32808,32809,
32810,32811,32812,32814,32819,32822,32824,32827,32829,32832,32834,
32835,32839,32853,32854,32855,32856,32861,32862,32878,32885,32886,
32891,33401,33402,33403,33405,33409,33411,33412,33417,33756,33757,33758,
33759,33761,33763,33764,33765,33766,33767,33769,33302,
33303,33304,33305,33306,33307,33308,33309,33311,33312,33315,33316,33334,33338,33339,33348,
33394
]
# Put client and date options in the sidebar
selected_zip = st.sidebar.selectbox(
'Choose Zipcode:',
zipcodes,
key='zipcodes'
)
st.markdown("""
* Renewables currently account for roughly only 4% of energy production in Florida.
* Stakeholders need to know how solar energy sources can supplement the power grid.
* The sunburst chart below shows the daily potential of energy demand that could be supplied by rooftop solar energy for 2019.
* This projection for 2019 is based on predictive modeling that predicts the daily rooftop solar energy potential and the energy demand based on the weather.
""")
# area_stats = pd.read_csv('data/RPMSZips.csv', dtype={'zip':str})
area_stats = pd.read_csv('apps/florida_weather_w_predictions_and_zip_codes.csv', dtype={'zipcode':str})
#st.write(area_stats.head())
st.markdown("""Energy Demand vs. Solar Production Potential for 62 most populated Florida zip codes for all of 2019:""")
# get f'{value:,}'
florida_predicted_demand = round(area_stats['real_pred_demand_mwh'].sum())
florida_predicted_solar = round(area_stats['solar_prod_mwh'].sum())
col1, col2 = st.columns(2)
col1.metric(label="Predicted Demand (mwh)", value=f'{florida_predicted_demand:,}'
#, delta=100
)
col2.metric(label = "Solar Rooftop Potential (mwh)", value = f'{florida_predicted_solar:,}',
#delta = 50
)
# create a dataframe that gets masked based on min and max date selectors
area_stats['date_time'] = | pd.to_datetime(area_stats['date_time']) | pandas.to_datetime |
# encoding=utf-8
""" gs_data centralizes all data import functions such as reading csv's
"""
import pandas as pd
import datetime as dt
from gs_datadict import *
def do_regions(ctydf: pd.DataFrame, mergef: str):
"""
do_regions assigns FIPS missing for multi-county regions, primarily occuring in UT where covid
data is rolled up into 6 multi-county regions. reading from a reference file, do_regions
assigns FIPS and cumulative population for the region, and identifies member counties
"""
namecol = ctydf.columns.get_loc('County')
state_idx = ctydf.loc[ctydf.State=='Utah'].index.to_list()
nonnull_idx = ctydf.loc[(ctydf.State=='Utah') & (ctydf.FIPS>'')].index.to_list()
# list comprehension finds rows which are missing a county FIPS
null_idx = [x for x in state_idx if x not in nonnull_idx]
merge_df = pd.read_csv(mergef, dtype={'fips0': str,'fips1': str,'fips2': str, 'fips3': str, 'fips4': str,'fips5': str})
# add a column for county population, we'll add region pop here
ctydf['Pop']= [None for x in range(len(ctydf))]
ctydf['Multi_FIPS']= [[] for x in range(len(ctydf))]
for x in null_idx:
this_region = ctydf.iat[x,1]
y = merge_df.loc[merge_df['Region']==this_region].to_dict('list')
ctydf.iat[x,0]= y['fips0'][0]
ctydf.iat[x,4]= y['Lat'][0]
ctydf.iat[x, 5] = y['Long'][0]
ctydf.iat[x, 9] = y['Long_Name'][0]
ctydf.iat[x, 11] = y['Pop'][0]
# make a list of county fips in the region, and add the list in column 'Multi-Fips' in master df
z = [y['fips0'][0], y['fips1'][0]]
if pd.notnull(y['fips2'][0]):
z.append(y['fips2'][0])
if pd.notnull(y['fips3'][0]):
z.append(y['fips3'][0])
if pd.notnull(y['fips4'][0]):
z.append(y['fips4'][0])
if pd.notnull(y['fips5'][0]):
z.append(y['fips5'][0])
ctydf.iat[x, 12] = z
z = []
y = {}
# ALSO need to deal with Dukes and Nantucket region in MA:
y = {'UID':[84070002], 'Region':['Dukes and Nantucket'], 'stFIPS':[25], 'Lat':[41.40674725], 'Long':[-70.68763497],
'Long_Name':['Dukes-Nantucket Region MA'], 'Pop':[28731], 'fips0':['25007'], 'fips1':['25019'],
'name0':['Dukes'], 'name1':['Nantucket'], 'pop0':[17332], 'pop1':[11399]}
state_idx = ctydf.loc[ctydf.State=='Massachusetts'].index.to_list()
nonnull_idx = ctydf.loc[(ctydf.State=='Massachusetts')&(ctydf.FIPS>'')].index.to_list()
null_idx = [x for x in state_idx if x not in nonnull_idx]
x = null_idx[0]
this_region = ctydf.iat[x, 1]
ctydf.iat[x, 0] = y['fips0'][0]
ctydf.iat[x, 4] = y['Lat'][0]
ctydf.iat[x, 5] = y['Long'][0]
ctydf.iat[x, 9] = y['Long_Name'][0]
ctydf.iat[x, 11] = y['Pop'][0]
ctydf.iat[x, 12] = [y['fips0'][0], y['fips1'][0]]
# final one is fixing Kansas City MO, not a rollup but for some reason it is sometimes outlier with no fips
MO_merge = {'region_pop': [459787],
'region_name': ['Kansas City MO'],
'prior_fips': [29000],
'prior_names': ['Kansas City Missouri']
}
return ctydf
def get_countypop(popfile: str, excludefile: str):
"""
get_statepop builds a Dataframe, indexed on State Fips, with est 2020 population
to avoid corrupting data, it then removes any counties which are part of a multi-county
rollup for covid reporting
"""
dfpop = pd.read_csv(popfile, usecols={0,3}, dtype={'FIPS': str,'Pop': int})
dfpop.set_index('FIPS', drop=True, inplace=True, verify_integrity=True) # df has only fips index and Pop field
dfpop.sort_index(inplace=True)
excl = pd.read_csv(excludefile, usecols={7,8,9,10,11,12}, dtype={'fips0': str, 'fips1': str, 'fips2': str,
'fips3': str, 'fips4': str, 'fips5': str})
for x in range(len(excl)):
dfpop.drop(index=excl.iat[x, 0], inplace=True)
dfpop.drop(index=excl.iat[x, 1], inplace=True)
if pd.notnull(excl.iat[x, 2]):
dfpop.drop(index=excl.iat[x, 2], inplace=True)
if pd.notnull(excl.iat[x, 3]):
dfpop.drop(index=excl.iat[x, 3], inplace=True)
if pd.notnull(excl.iat[x, 4]):
dfpop.drop(index=excl.iat[x, 4], inplace=True)
if | pd.notnull(excl.iat[x, 5]) | pandas.notnull |
import collections
import logging
import multiprocessing
import os
import re
import warnings
import numpy as np
import pandas as pd
import tables
from trafficgraphnn.utils import (E1IterParseWrapper, E2IterParseWrapper,
TLSSwitchIterParseWrapper, _col_dtype_key,
col_type, pairwise_iterate)
_logger = logging.getLogger(__name__)
def write_hdf_for_sumo_network(sumo_network, multiprocess=True):
output_dir = os.path.join(os.path.dirname(sumo_network.netfile),
'output')
if multiprocess:
output_hdf = sumo_output_xmls_to_hdf_multiprocess(output_dir)
else:
output_hdf = sumo_output_xmls_to_hdf(output_dir)
light_switch_out_files = light_switch_out_files_for_sumo_network(
sumo_network)
assert len(light_switch_out_files) == 1 # more than one xml not supported yet
tls_output_xml_to_hdf(light_switch_out_files.pop())
return output_hdf
def light_switch_out_files_for_sumo_network(sumo_network):
light_switch_out_files = set()
for edge in sumo_network.graph.edges.data():
try:
light_switch_out_files.add(
os.path.join(os.path.dirname(sumo_network.netfile),
edge[-1]['tls_output_info']['dest']))
except KeyError:
continue
return light_switch_out_files
def _append_to_store(store, buffer, all_ids):
converter = {col: _col_dtype_key[col]
for col in buffer.keys()
if col in _col_dtype_key}
df = pd.DataFrame.from_dict(buffer)
df = df.astype(converter)
df = df.set_index('begin')
for i in all_ids:
# sub_df = df.loc[df['id'] == i]
# sub_df = sub_df.set_index('begin')
sub_df = df.query(f"id == '{i}'")
with warnings.catch_warnings():
warnings.simplefilter('ignore', tables.NaturalNameWarning)
store.append(f'raw_xml/{i}', sub_df)
assert len(store[f'raw_xml/{i}'].loc[0].shape) == 1, \
'id %s has len(store[id].loc[0].shape) = %g' % (i, len(store[f'raw_xml/{i}'].loc[0].shape))
def xml_to_df_hdf(parser,
store_filename,
hdf_file_mode='a',
complevel=7,
complib='zlib',
start_time=0,
end_time=np.inf,
buffer_size=1e5):
buffer = collections.defaultdict(list)
i = 0
all_ids = set()
with pd.HDFStore(store_filename, hdf_file_mode, complevel=complevel,
complib=complib) as store:
for _ in parser.iterate_until(start_time):
pass
for row in parser.iterate_until(end_time):
for k, v in row.items():
buffer[k].append(v)
all_ids.add(row.get('id'))
i += 1
if i >= buffer_size:
_append_to_store(store, buffer, all_ids)
buffer = collections.defaultdict(list)
i = 0
_append_to_store(store, buffer, all_ids)
def detector_output_xml_to_df(xml_filename):
basename = os.path.basename(xml_filename)
if '_e1_' in basename:
parser = E1IterParseWrapper(xml_filename, True)
elif '_e2_' in basename:
parser = E2IterParseWrapper(xml_filename, True)
else:
return
rows = collections.defaultdict(list)
for row in parser.iterate_until(np.inf):
for k, v in row.items():
rows[k].append(v)
id_set = set(rows['id'])
# sanity check there is only one detector
assert len(id_set) == 1
det_id = id_set.pop()
converter = {col: _col_dtype_key[col]
for col in rows.keys()
if col in _col_dtype_key}
df = (pd.DataFrame.from_dict(rows)
.astype(converter)
.drop(columns='id')
.set_index('begin'))
return {det_id: df}
def sumo_output_xmls_to_hdf_multiprocess(output_dir,
hdf_filename='raw_xml.hdf',
complevel=5,
complib='blosc:lz4',
num_workers=None,
remove_old_if_exists=True):
file_list = output_files_in_dir(output_dir)
output_filename = os.path.join(output_dir, hdf_filename)
if (remove_old_if_exists and os.path.exists(output_filename)
and os.path.isfile(output_filename)):
os.remove(output_filename)
_logger.debug('Removed file %s for new one', output_filename)
with multiprocessing.Pool(num_workers) as pool:
dfs = pool.map(detector_output_xml_to_df, file_list)
dfs = {k: v for d in dfs if d is not None for k, v in d.items()}
with pd.HDFStore(output_filename, complevel=complevel,
complib=complib) as store:
for det_id, df in dfs.items():
store.append('raw_xml/{}'.format(det_id), df)
return output_filename
def output_files_in_dir(output_dir):
file_list = [os.path.join(output_dir, f) for f in os.listdir(output_dir)]
file_list = [f for f in file_list
if os.path.isfile(f) and os.path.splitext(f)[-1] == '.xml']
return file_list
def sumo_output_xmls_to_hdf(output_dir,
hdf_filename='raw_xml.hdf',
complevel=5,
complib='blosc:lz4',
remove_old_if_exists=True):
file_list = output_files_in_dir(output_dir)
output_filename = os.path.join(output_dir, hdf_filename)
if (remove_old_if_exists and os.path.exists(output_filename)
and os.path.isfile(output_filename)):
os.remove(output_filename)
_logger.debug('Removed file %s for new one', output_filename)
for filename in file_list:
basename = os.path.basename(filename)
if '_e1_' in basename:
parser = E1IterParseWrapper(filename, True)
elif '_e2_' in basename:
parser = E2IterParseWrapper(filename, True)
else:
continue
xml_to_df_hdf(parser, output_filename, complevel=complevel,
complib=complib)
return output_filename
def tls_output_xml_to_hdf(xml_file,
hdf_filename='raw_xml.hdf',
complevel=5,
complib='blosc:lz4'):
df = light_timing_xml_to_phase_df(xml_file)
file_dir = os.path.dirname(xml_file)
hdf_filename = os.path.join(file_dir, hdf_filename)
with pd.HDFStore(hdf_filename, complevel=complevel,
complib=complib) as store:
store.append('raw_xml/tls_switch', df, append=False)
return df
def light_timing_xml_to_phase_df(xml_file):
parser = TLSSwitchIterParseWrapper(xml_file, True)
data = [dict(e.attrib) for e in parser.iterate_until(np.inf)]
df = | pd.DataFrame(data) | pandas.DataFrame |
from unittest import TestCase
import pandas as pd
from pandas.api.types import is_numeric_dtype
import numpy as np
from scripts.utils import di
class TestDi(TestCase):
"""Test di() in utils.py"""
def test_di_nan_row(self):
"""Tests that correct distance is computed if NaNs occur in a row of a column"""
df = pd.DataFrame({"A": ["high", np.nan, "high", "low", "low", "high"], "B": [3, 2, 1, np.nan, 0.5, 2],
"C": [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
"Class": ["apple", "apple", "banana", "banana", "banana", "banana"]})
class_col_name = "Class"
rule = pd.Series({"A": "high", "B": (1, 2), "C":(1, np.NaN), "Class": "banana"})
min_max = pd.DataFrame({"B": {"min": 1, "max": 5}})
correct = [pd.Series([1/4*1/4, 0.0, 0.0, 1.0, 1/8*1/8, 0.0], name="A"),
pd.Series([1.0, 1.0, 1.0, 1.0, 1.0, 1.0], name="A")]
j = 0
for i, col_name in enumerate(df):
if col_name == class_col_name:
continue
col = df[col_name]
if is_numeric_dtype(col):
dist = di(col, rule, min_max)
self.assertTrue(dist.equals(correct[j]))
j += 1
def test_di_nan_rule(self):
"""Tests that correct distance is computed if NaNs occur in a rule"""
df = pd.DataFrame({"A": ["high", np.nan, "high", "low", "low", "high"], "B": [3, 2, 1, np.nan, 1, 2],
"Class": ["apple", "apple", "banana", "banana", "banana", "banana"]})
class_col_name = "Class"
rule = pd.Series({"A": "high", "B": (np.NaN, 2), "Class": "banana"})
min_max = pd.DataFrame({"B": {"min": 1, "max": 2}})
correct = pd.Series([1.0, 1.0, 1.0, 1.0, 1.0, 1.0], name="A")
for i, col_name in enumerate(df):
if col_name == class_col_name:
continue
col = df[col_name]
if is_numeric_dtype(col):
dist = di(col, rule, min_max)
self.assertTrue(dist.equals(correct))
def test_di_single_feature(self):
"""Tests that correct distance is computed for 1 numeric feature"""
df = pd.DataFrame({"A": ["high", np.nan, "high", "low", "low", "high"], "B": [3, 2, 1, .5, 1, 2],
"Class": ["apple", "apple", "banana", "banana", "banana", "banana"]})
class_col_name = "Class"
rule = pd.Series({"A": "high", "B": (1, 2), "Class": "banana"})
dist = None
min_max = pd.DataFrame({"B": {"min": 1, "max": 5}})
correct = pd.Series({0: 0.25*0.25, 1: 0, 2: 0, 3: 0.125*0.125, 4: 0, 5: 0.0})
for i, col_name in enumerate(df):
if col_name == class_col_name:
continue
col = df[col_name]
if is_numeric_dtype(col):
dist = di(col, rule, min_max)
self.assertTrue(dist.equals(correct))
def test_di_multiple_features(self):
"""Tests that correct distance is computed for 2 numeric features"""
df = pd.DataFrame({"A": [1, 1, 4, 1.5, 0.5, 0.75], "B": [3, 2, 1, .5, 3, 2],
"Class": ["apple", "apple", "banana", "banana", "banana", "banana"]})
class_col_name = "Class"
rule = pd.Series({"A": (1, 2), "B": (1, 2), "Class": "banana"})
dists = []
min_max = pd.DataFrame({"A": {"min": 1, "max": 5}, "B": {"min": 1, "max": 11}})
# Only keep rows with the same class label as the rule
df = df.loc[df[class_col_name] == "banana"]
correct = [pd.Series({2: 0.5*0.5, 3: 0.0, 4: 0.125*0.125, 5: 1/16*1/16}),
pd.Series({2: 0, 3: 0.05 * 0.05, 4: 0.1*0.1, 5: 0.0})]
for i, col_name in enumerate(df):
if col_name == class_col_name:
continue
col = df[col_name]
if is_numeric_dtype(col):
dist = di(col, rule, min_max)
dists.append(dist)
self.assertTrue(dists[i].equals(correct[i]))
def test_di_multiple_features_multiple_rules(self):
"""Tests that correct distance is computed for 2 numeric features"""
df = pd.DataFrame({"A": [1, 1, 4, 1.5, 0.5, 0.75], "B": [3, 2, 1, .5, 3, 2],
"C": ["a", "b", "c", "d", "e", "f"],
"Class": ["apple", "apple", "banana", "banana", "banana", "banana"]})
class_col_name = "Class"
rules = [pd.Series({"A": (1, 2), "B": (1, 2), "Class": "banana"}),
pd.Series({"A": (1, 2), "B": (1, 2), "Class": "banana"})]
dists = []
min_max = pd.DataFrame({"A": {"min": 1, "max": 5}, "B": {"min": 1, "max": 11}})
# Only keep rows with the same class label as the rule
df = df.loc[df[class_col_name] == "banana"]
correct = [pd.Series({2: 0.5*0.5, 3: 0.0, 4: 0.125*0.125, 5: 1/16*1/16}),
| pd.Series({2: 0, 3: 0.05 * 0.05, 4: 0.1*0.1, 5: 0.0}) | pandas.Series |
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import config_local
import logging
_logger = logging.getLogger(__name__)
class PrepareData:
def __init__(self):
path_str = './data/' + config_local.data_clean['data_file_name'] + '.csv'
self.ds = pd.read_csv(path_str)
def clean(self):
# self.ds = self.ds.dropna(how='all')
self.add()
def add(self):
self.ds['incident_month'] = | pd.to_datetime(self.ds['incident_date']) | pandas.to_datetime |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#-------------read csv---------------------
df_2010_2011 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2010_2011.csv")
df_2012_2013 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2012_2013.csv")
df_2014_2015 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2014_2015.csv")
df_2016_2017 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2016_2017.csv")
df_2018_2019 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2018_2019.csv")
df_2010_2011['prcab'].fillna(2, inplace=True)
df_2012_2013['prcab'].fillna(2, inplace=True)
df_2014_2015['prcab'].fillna(2, inplace=True)
df_2016_2017['prcab'].fillna(2, inplace=True)
df_2018_2019['prcab'].fillna(2, inplace=True)
mask = df_2010_2011['surgyear'] != 2010
df_2011 = df_2010_2011[mask]
df_2010 = df_2010_2011[~mask]
mask2 = df_2012_2013['surgyear'] != 2012
df_2013 = df_2012_2013[mask2]
df_2012 = df_2012_2013[~mask2]
mask3 = df_2014_2015['surgyear'] != 2014
df_2015 = df_2014_2015[mask3]
df_2014 = df_2014_2015[~mask3]
mask4 = df_2016_2017['surgyear'] != 2016
df_2017 = df_2016_2017[mask4]
df_2016 = df_2016_2017[~mask4]
mask5 = df_2018_2019['surgyear'] != 2018
df_2019 = df_2018_2019[mask5]
df_2018 = df_2018_2019[~mask5]
avg_siteid = pd.DataFrame()
avg_surgid = pd.DataFrame()
def groupby_siteid():
df2010 = df_2010.groupby('siteid')['siteid'].count().reset_index(name='2010_total')
df2011 = df_2011.groupby('siteid')['siteid'].count().reset_index(name='2011_total')
df2012 = df_2012.groupby('siteid')['siteid'].count().reset_index(name='2012_total')
df2013 = df_2013.groupby('siteid')['siteid'].count().reset_index(name='2013_total')
df2014 = df_2014.groupby('siteid')['siteid'].count().reset_index(name='2014_total')
df2015 = df_2015.groupby('siteid')['siteid'].count().reset_index(name='2015_total')
df2016 = df_2016.groupby('siteid')['siteid'].count().reset_index(name='2016_total')
df2017 = df_2017.groupby('siteid')['siteid'].count().reset_index(name='2017_total')
df2018 = df_2018.groupby('siteid')['siteid'].count().reset_index(name='2018_total')
df2019 = df_2019.groupby('siteid')['siteid'].count().reset_index(name='2019_total')
df1 =pd.merge(df2010, df2011, on='siteid', how='outer')
df2 =pd.merge(df1, df2012, on='siteid', how='outer')
df3 =pd.merge(df2, df2013, on='siteid', how='outer')
df4 =pd.merge(df3, df2014, on='siteid', how='outer')
df5 =pd.merge(df4, df2015, on='siteid', how='outer')
df6 = | pd.merge(df5, df2016, on='siteid', how='outer') | pandas.merge |
# coding: utf-8
# In[ ]:
from __future__ import division
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import OneHotEncoder,LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
from scipy import sparse
import xgboost as xgb
import lightgbm as lgb
import cPickle
import time
import datetime
import math
import os
from multiprocessing import cpu_count
import gc
import warnings
warnings.filterwarnings('ignore')
# In[ ]:
# Constants define
ROOT_PATH = '/home/kaholiu/xiaoxy/2018-Tencent-Lookalike/'
ONLINE = 0
# In[ ]:
target = 'label'
train_len = 45539700 # 8798814
test1_len = 11729073 # 2265989
test2_len = 11727304 # 2265879
positive_num = 2182403 # 421961
# In[ ]:
########################################### Helper function ###########################################
# In[ ]:
def log(info):
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' ' + str(info))
# In[ ]:
def merge_count(df, columns_groupby, new_column_name, type='uint64'):
add = pd.DataFrame(df.groupby(columns_groupby).size()).reset_index()
add.columns = columns_groupby + [new_column_name]
df = df.merge(add, on=columns_groupby, how="left"); del add; gc.collect()
df[new_column_name] = df[new_column_name].astype(type)
return df
# In[ ]:
def preprocess_word(texts):
pre_texts = []
for text in texts:
words = text.split()
pre_words = []
for word in words:
pre_words.append('W' + word)
pre_text = ' '.join(pre_words)
pre_texts.append(pre_text)
return pre_texts
# In[ ]:
def down_sample(df, df_feat):
df_majority = df_feat[df[target]==0]
df_minority = df_feat[df[target]==1]
df_majority_downsampled = resample(df_majority,
replace=False, # sample without replacement
n_samples=positive_num*3, # to match minority class
random_state=7) # reproducible results
df_downsampled = | pd.concat([df_majority_downsampled, df_minority]) | pandas.concat |
from numbers import Number
from collections import Iterable
import re
import pandas as pd
from pandas.io.stata import StataReader
import numpy as np
pd.set_option('expand_frame_repr', False)
class hhkit(object):
def __init__(self, *args, **kwargs):
# if input data frame is specified as a stata data file or text file
if len(args) > 0:
if isinstance(args[0], pd.DataFrame):
self.from_dict(args[0])
else:
compiled_pattern = re.compile(r'\.(?P<extension>.{3})$')
p = re.search(compiled_pattern,str(args[0]))
if p is not None:
if (p.group('extension').lower() == "dta"):
self.read_stata(*args, **kwargs)
elif (p.group('extension').lower() == "csv" or p.group('extension').lower() == "txt"):
self.df = pd.read_csv('sample_hh_dataset.csv')
self._initialize_variable_labels()
else:
pass
# print('Unrecognized file type: %s' % p.group('extension'))
else:
pass
print('Unrecognized file type: %s' % p.group('extension'))
def _is_numeric(self, obj):
for element in obj:
try:
0+element
except TypeError:
return False
return True
def _create_key(self, dfon):
new_list = []
for mytuple in zip(*dfon):
temp_new_list_item = ''
for item in mytuple:
temp_new_list_item += str(item)
new_list += [temp_new_list_item]
return new_list
def _initialize_variable_labels(self):
# make sure variable_labels exists
try: self.variable_labels
except: self.variable_labels = {}
# make sure each column has a variable label
for var in self.df.columns.values:
# check if var is already in the list of variable labels
if var not in self.variable_labels:
self.variable_labels[var] = ''
return self.variable_labels
def _make_include_exclude_series(self, df, include, exclude):
using_excl = False
if (include is None) and (exclude is None):
# Make an array or data series same length as df with all entries true - all rows are included
include = pd.Series([True]*df.shape[0])
elif (include is not None) and (exclude is not None):
# raise an error saying that can only specify one
raise Exception("Specify either include or exclude, but not both")
elif (include is not None):
# check that dimensions and content are correct
pass
elif (exclude is not None):
# check that dimensions and content are correct
using_excl = True
include = exclude
# include = np.invert(exclude)
# Lets make sure we work with boolean include arrays/series. Convert numeric arrays to boolean
if (self._is_numeric(include)):
# Make this a boolean
include = [x!=0 for x in include]
elif (include.dtype is not np.dtype('bool')):
raise Exception('The include and exclude series or arrays must be either numeric or boolean.')
if (using_excl):
include = np.invert(include)
return include
def set_variable_labels(self, varlabeldict={}):
self._initialize_variable_labels()
for var in varlabeldict:
self.variable_labels[var] = varlabeldict[var]
return self.variable_labels
# Here is a 'count' method for calculating household size
def egen(self, operation, groupby, column, obj=None, column_label='', include=None, exclude=None, varlabel='',
replacenanwith=None):
if obj is None:
df = self.df
else:
df=obj.df
include = self._make_include_exclude_series(df, include, exclude)
if column_label == '':
column_label = '('+operation+') '+column+' by '+groupby
result = df[include].groupby(groupby)[column].agg([operation])
result.rename(columns={operation:column_label}, inplace=True)
merged = pd.merge(df, result, left_on=groupby, right_index=True, how='left')
if replacenanwith is not None:
merged[column_label][merged[column_label].isnull()]=replacenanwith
self.df = merged
self.set_variable_labels(varlabeldict={column_label:varlabel,})
return merged
def read_stata(self, *args, **kwargs):
reader = StataReader(*args, **kwargs)
self.df = reader.data()
self.variable_labels = reader.variable_labels()
self._initialize_variable_labels()
self.value_labels = reader.value_labels()
# self.data_label = reader.data_label()
return self.df
def sdesc(self, varlist=None, varnamewidth=20, vartypewidth=10, varlabelwidth=70, borderwidthinchars=100):
if varlist is None:
list_of_vars = self.df.columns.values
else:
list_of_vars = varlist
print('-'*borderwidthinchars)
print('obs: %d' % self.df.shape[0])
print('vars: %d' % len(list_of_vars))
print('-'*borderwidthinchars)
# print('--------'.ljust(varnamewidth), '---------'.ljust(vartypewidth), ' ', '--------------'.ljust(varlabelwidth), end='\n')
print('Variable'.ljust(varnamewidth), 'Data Type'.ljust(vartypewidth), ' ', 'Variable Label'.ljust(varlabelwidth), end='\n')
print('-'*borderwidthinchars)
# print('--------'.ljust(varnamewidth), '---------'.ljust(vartypewidth), ' ', '--------------'.ljust(varlabelwidth), end='\n')
for x in list_of_vars:
print(repr(x).ljust(varnamewidth), str(self.df[x].dtype).ljust(vartypewidth), ' ', self.variable_labels[x].ljust(varlabelwidth), end='\n')
return True
def from_dict(self, *args, **kwargs):
self.df = pd.DataFrame(*args, **kwargs)
self.variable_labels = {}
self.value_labels = {}
return self.df
def statamerge(self, obj, on, how='outer', mergevarname='_m', replacelabels=True):
df_using_right = obj.df
# create a unique key based on the 'on' list
dfon_left_master = [self.df[x] for x in on]
dfon_left_master2 = []
for dfx in dfon_left_master:
if dfx.dtype is not np.dtype('object'): # We want to allow string keys
dfon_left_master2 += [dfx.astype(float)] # We want 1 and 1.0 to be considered equal when converted
# to a string, so make them 1.0 and 1.0 respectively
else:
dfon_left_master2 += [dfx]
dfon_right_using = [df_using_right[x] for x in on]
dfon_right_using2 = []
for dfx in dfon_right_using:
if dfx.dtype is not np.dtype('object'):
dfon_right_using2 += [dfx.astype(float)]
else:
dfon_left_master2 += [dfx]
left_master_on_key = self._create_key(dfon_left_master2)
right_using_on_key = self._create_key(dfon_right_using2)
# create a new column in each dataset with the combined key
self.df['_left_merge_key'] = pd.Series(left_master_on_key)
df_using_right['_right_merge_key'] = pd.Series(right_using_on_key)
self.df = pd.merge(self.df, df_using_right, on=on, how=how)
self.df[mergevarname] = 0
self.df[mergevarname][self.df['_left_merge_key'].isnull()] = 2
self.df[mergevarname][self.df['_right_merge_key'].isnull()] = 1
self.df[mergevarname][self.df[mergevarname] == 0 ] = 3
del self.df['_left_merge_key']
del self.df['_right_merge_key']
# How about the variable labels?
variable_labels_to_add_to_merged_dataset_dict = {}
try:
obj.variable_labels
except:
obj.variable_labels = {}
if (replacelabels): # replace the variable lables with those in the right/using dataset
for var in obj.variable_labels:
if (not obj.variable_labels[var]==""):
variable_labels_to_add_to_merged_dataset_dict[var]=obj.variable_labels[var]
else: # don't replace the variable lables with those in the right/using dataset, just add variable labels
# for variables that are not already in the left/master dataset
for var in obj.variable_labels:
if var not in self.variable_labels:
variable_labels_to_add_to_merged_dataset_dict[var]=obj.variable_labels[var]
self.set_variable_labels(variable_labels_to_add_to_merged_dataset_dict)
self._initialize_variable_labels()
return self.df
def tab(self, columns, shownan=False, p=True, includenan=True, includenanrows=True,
includenancols=True, dropna=False, decimalplaces=5, usevarlabels=[True, True],
include=None, exclude=None, weightcolumn=None):
include = self._make_include_exclude_series(self.df, include, exclude)
df = self.df[include]
if (weightcolumn is None):
df['_w'] = pd.Series([1]*df.shape[0]) # remember to delete this column
else:
df['_w'] = df[weightcolumn] # remember to delete this column
if (isinstance(columns, str) or (isinstance(columns, Iterable) and len(columns)==1)):
# One way tabulation - tabulation of one variable
if (isinstance(columns, str)):
column = columns
else:
column = columns[0]
df['_deleteme'] = 1
if (includenan):
table = pd.crosstab(columns=df[column].astype(str), index=df['_deleteme'], dropna=dropna)
else:
table = pd.crosstab(columns=df[column], index=df['_deleteme'], dropna=dropna)
table1 = pd.DataFrame(table.sum(axis=0))
table1.index.names = [column]
table1.columns = ['count']
del df['_deleteme']
# account for weights
df['_'+column] = df[column].astype(str)
w_by_col = df.groupby('_'+column)['_w'].agg('sum')
# w_by_col.index = w_by_col.index.astype(str) #yes, this is quite likely redundant
# make sure the 'nan' is at the bottom, if it is there at all
if ('nan' in table1.index):
table1 = pd.concat([table1[table1.index != 'nan'], table1[table1.index == 'nan']])
if ('nan' in w_by_col.index):
w_by_col = pd.concat([w_by_col[w_by_col.index != 'nan'], w_by_col[w_by_col.index == 'nan']])
norm_w_by_col = w_by_col/w_by_col.sum()
sum_of_counts = table1['count'].sum()
table1['count']=norm_w_by_col*sum_of_counts
table1['percent'] = 100*table1['count']/table1['count'].sum()
# use variable labels?
if (isinstance(usevarlabels, bool)):
if (usevarlabels == True):
table1.index.name = self.variable_labels[column]
# Add a row with totals
table1.loc['total'] = [table1['count'].sum(), table1['percent'].sum()]
if (p):
print(table1)
return table1
elif (isinstance(columns, Iterable)):
if (includenanrows and includenancols):
table = pd.crosstab(df[columns[0]].astype(str), df[columns[1]].astype(str), dropna=dropna)
elif (includenanrows and not includenancols):
table = pd.crosstab(df[columns[0]].astype(str), df[columns[1]], dropna=dropna)
elif (not includenanrows and includenancols):
table = pd.crosstab(df[columns[0]], df[columns[1]].astype(str), dropna=dropna)
else:
table = | pd.crosstab(df[columns[0]], df[columns[1]], dropna=dropna) | pandas.crosstab |
'''
ASTGCN
'''
import sys
import math
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import pandas as pd
from Param import *
from Param_ASTGCN import *
from scipy.sparse.linalg import eigs
from torchsummary import summary
from Utils import *
class Spatial_Attention_layer(nn.Module):
'''
compute spatial attention scores
'''
def __init__(self, DEVICE, in_channels, num_of_vertices, num_of_timesteps):
super(Spatial_Attention_layer, self).__init__()
self.W1 = nn.Parameter(torch.FloatTensor(num_of_timesteps).to(DEVICE))
self.W2 = nn.Parameter(torch.FloatTensor(in_channels, num_of_timesteps).to(DEVICE))
self.W3 = nn.Parameter(torch.FloatTensor(in_channels).to(DEVICE))
self.bs = nn.Parameter(torch.FloatTensor(1, num_of_vertices, num_of_vertices).to(DEVICE))
self.Vs = nn.Parameter(torch.FloatTensor(num_of_vertices, num_of_vertices).to(DEVICE))
def forward(self, x):
'''
:param x: (batch_size, N, F_in, T)
:return: (B,N,N)
'''
lhs = torch.matmul(torch.matmul(x, self.W1), self.W2) # (b,N,F,T)(T)->(b,N,F)(F,T)->(b,N,T)
rhs = torch.matmul(self.W3, x).transpose(-1, -2) # (F)(b,N,F,T)->(b,N,T)->(b,T,N)
product = torch.matmul(lhs, rhs) # (b,N,T)(b,T,N) -> (B, N, N)
S = torch.matmul(self.Vs, torch.sigmoid(product + self.bs)) # (N,N)(B, N, N)->(B,N,N)
S_normalized = F.softmax(S, dim=1)
return S_normalized
class cheb_conv_withSAt(nn.Module):
'''
K-order chebyshev graph convolution
'''
def __init__(self, K, cheb_polynomials, in_channels, out_channels):
'''
:param K: int
:param in_channles: int, num of channels in the input sequence
:param out_channels: int, num of channels in the output sequence
'''
super(cheb_conv_withSAt, self).__init__()
self.K = K
self.cheb_polynomials = cheb_polynomials
self.in_channels = in_channels
self.out_channels = out_channels
self.DEVICE = cheb_polynomials[0].device
self.Theta = nn.ParameterList([nn.Parameter(torch.FloatTensor(in_channels, out_channels).to(self.DEVICE)) for _ in range(K)])
def forward(self, x, spatial_attention):
'''
Chebyshev graph convolution operation
:param x: (batch_size, N, F_in, T)
:return: (batch_size, N, F_out, T)
'''
batch_size, num_of_vertices, in_channels, num_of_timesteps = x.shape
outputs = []
for time_step in range(num_of_timesteps):
graph_signal = x[:, :, :, time_step] # (b, N, F_in)
output = torch.zeros(batch_size, num_of_vertices, self.out_channels).to(self.DEVICE) # (b, N, F_out)
for k in range(self.K):
T_k = self.cheb_polynomials[k] # (N,N)
T_k_with_at = T_k.mul(spatial_attention) # (N,N)*(N,N) = (N,N) 多行和为1, 按着列进行归一化
theta_k = self.Theta[k] # (in_channel, out_channel)
rhs = T_k_with_at.permute(0, 2, 1).matmul(graph_signal) # (N, N)(b, N, F_in) = (b, N, F_in) 因为是左乘,所以多行和为1变为多列和为1,即一行之和为1,进行左乘
output = output + rhs.matmul(theta_k) # (b, N, F_in)(F_in, F_out) = (b, N, F_out)
outputs.append(output.unsqueeze(-1)) # (b, N, F_out, 1)
return F.relu(torch.cat(outputs, dim=-1)) # (b, N, F_out, T)
class Temporal_Attention_layer(nn.Module):
def __init__(self, DEVICE, in_channels, num_of_vertices, num_of_timesteps):
super(Temporal_Attention_layer, self).__init__()
self.U1 = nn.Parameter(torch.FloatTensor(num_of_vertices).to(DEVICE))
self.U2 = nn.Parameter(torch.FloatTensor(in_channels, num_of_vertices).to(DEVICE))
self.U3 = nn.Parameter(torch.FloatTensor(in_channels).to(DEVICE))
self.be = nn.Parameter(torch.FloatTensor(1, num_of_timesteps, num_of_timesteps).to(DEVICE))
self.Ve = nn.Parameter(torch.FloatTensor(num_of_timesteps, num_of_timesteps).to(DEVICE))
def forward(self, x):
'''
:param x: (batch_size, N, F_in, T)
:return: (B, T, T)
'''
_, num_of_vertices, num_of_features, num_of_timesteps = x.shape
lhs = torch.matmul(torch.matmul(x.permute(0, 3, 2, 1), self.U1), self.U2)
# x:(B, N, F_in, T) -> (B, T, F_in, N)
# (B, T, F_in, N)(N) -> (B,T,F_in)
# (B,T,F_in)(F_in,N)->(B,T,N)
rhs = torch.matmul(self.U3, x) # (F)(B,N,F,T)->(B, N, T)
product = torch.matmul(lhs, rhs) # (B,T,N)(B,N,T)->(B,T,T)
E = torch.matmul(self.Ve, torch.sigmoid(product + self.be)) # (B, T, T)
E_normalized = F.softmax(E, dim=1)
return E_normalized
class cheb_conv(nn.Module):
'''
K-order chebyshev graph convolution
'''
def __init__(self, K, cheb_polynomials, in_channels, out_channels):
'''
:param K: int
:param in_channles: int, num of channels in the input sequence
:param out_channels: int, num of channels in the output sequence
'''
super(cheb_conv, self).__init__()
self.K = K
self.cheb_polynomials = cheb_polynomials
self.in_channels = in_channels
self.out_channels = out_channels
self.DEVICE = cheb_polynomials[0].device
self.Theta = nn.ParameterList([nn.Parameter(torch.FloatTensor(in_channels, out_channels).to(self.DEVICE)) for _ in range(K)])
def forward(self, x):
'''
Chebyshev graph convolution operation
:param x: (batch_size, N, F_in, T)
:return: (batch_size, N, F_out, T)
'''
batch_size, num_of_vertices, in_channels, num_of_timesteps = x.shape
outputs = []
for time_step in range(num_of_timesteps):
graph_signal = x[:, :, :, time_step] # (b, N, F_in)
output = torch.zeros(batch_size, num_of_vertices, self.out_channels).to(self.DEVICE) # (b, N, F_out)
for k in range(self.K):
T_k = self.cheb_polynomials[k] # (N,N)
theta_k = self.Theta[k] # (in_channel, out_channel)
rhs = graph_signal.permute(0, 2, 1).matmul(T_k).permute(0, 2, 1)
output = output + rhs.matmul(theta_k)
outputs.append(output.unsqueeze(-1))
return F.relu(torch.cat(outputs, dim=-1))
class ASTGCN_block(nn.Module):
def __init__(self, DEVICE, in_channels, K, nb_chev_filter, nb_time_filter, time_strides, cheb_polynomials, num_of_vertices, num_of_timesteps):
super(ASTGCN_block, self).__init__()
self.TAt = Temporal_Attention_layer(DEVICE, in_channels, num_of_vertices, num_of_timesteps)
self.SAt = Spatial_Attention_layer(DEVICE, in_channels, num_of_vertices, num_of_timesteps)
self.cheb_conv_SAt = cheb_conv_withSAt(K, cheb_polynomials, in_channels, nb_chev_filter)
self.time_conv = nn.Conv2d(nb_chev_filter, nb_time_filter, kernel_size=(1, 3), stride=(1, time_strides), padding=(0, 1))
self.residual_conv = nn.Conv2d(in_channels, nb_time_filter, kernel_size=(1, 1), stride=(1, time_strides))
self.ln = nn.LayerNorm(nb_time_filter) #需要将channel放到最后一个维度上
def forward(self, x):
'''
:param x: (batch_size, N, F_in, T)
:return: (batch_size, N, nb_time_filter, T)
'''
batch_size, num_of_vertices, num_of_features, num_of_timesteps = x.shape
# TAt
temporal_At = self.TAt(x) # (b, T, T)
x_TAt = torch.matmul(x.reshape(batch_size, -1, num_of_timesteps), temporal_At).reshape(batch_size, num_of_vertices, num_of_features, num_of_timesteps)
# SAt
spatial_At = self.SAt(x_TAt)
# cheb gcn
spatial_gcn = self.cheb_conv_SAt(x, spatial_At) # (b,N,F,T)
# spatial_gcn = self.cheb_conv(x)
# convolution along the time axis
time_conv_output = self.time_conv(spatial_gcn.permute(0, 2, 1, 3)) # (b,N,F,T)->(b,F,N,T) 用(1,3)的卷积核去做->(b,F,N,T)
# residual shortcut
x_residual = self.residual_conv(x.permute(0, 2, 1, 3)) # (b,N,F,T)->(b,F,N,T) 用(1,1)的卷积核去做->(b,F,N,T)
x_residual = self.ln(F.relu(x_residual + time_conv_output).permute(0, 3, 2, 1)).permute(0, 2, 3, 1)
# (b,F,N,T)->(b,T,N,F) -ln-> (b,T,N,F)->(b,N,F,T)
return x_residual
class ASTGCN(nn.Module):
def __init__(self, DEVICE, cheb_polynomials, nb_block= 2, in_channels =1 , K=3, nb_chev_filter = 64, nb_time_filter =64, time_strides = 2, num_for_predict = TIMESTEP_OUT, len_input = 12*(WEEK+DAY+HOUR), num_of_vertices = N_NODE):
'''
:param nb_block:
:param in_channels:
:param K:
:param nb_chev_filter:
:param nb_time_filter:
:param time_strides:
:param cheb_polynomials:
:param nb_predict_step:
'''
super(ASTGCN, self).__init__()
self.BlockList = nn.ModuleList([ASTGCN_block(DEVICE, in_channels, K, nb_chev_filter, nb_time_filter, time_strides, cheb_polynomials, num_of_vertices, len_input)])
self.BlockList.extend([ASTGCN_block(DEVICE, nb_time_filter, K, nb_chev_filter, nb_time_filter, 1, cheb_polynomials, num_of_vertices, len_input//time_strides) for _ in range(nb_block-1)])
self.final_conv = nn.Conv2d(int(len_input/time_strides), num_for_predict, kernel_size=(1, nb_time_filter))
self.DEVICE = DEVICE
self.to(DEVICE)
def forward(self, x):
'''
:param x: (B, N_nodes, F_in, T_in)
:return: (B, N_nodes, T_out)
'''
for block in self.BlockList:
x = block(x)
output = self.final_conv(x.permute(0, 3, 1, 2))[:, :, :, -1].permute(0, 2, 1)
# (b,N,F,T)->(b,T,N,F)-conv<1,F>->(b,c_out*T,N,1)->(b,c_out*T,N)->(b,N,T)
return output
def adj_tans(sensor_ids_file, distance_file):
with open(sensor_ids_file) as f:
sensor_ids = f.read().strip().split(',')
sensor_ids2 = pd.DataFrame({'from':sensor_ids,'to':sensor_ids})
sensor_ids2['No']=sensor_ids2.index
distance_df = pd.read_csv(distance_file,dtype={'from': 'str', 'to': 'str'})
result1 = pd.merge(distance_df,sensor_ids2,on='from',suffixes = ['','_sen'])
del result1['to_sen']
result = | pd.merge(result1,sensor_ids2,on='to') | pandas.merge |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 14 14:45:58 2018
@author: yasir
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.compose import make_column_transformer
from sklearn.model_selection import train_test_split
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras import optimizers
from keras.callbacks import TensorBoard
import json
from ModelQuality import Evaluator, Optimizer
def evaluateClassifier():
classifier = Sequential()
classifier.add(Dense(units = 6, kernel_initializer = "uniform", activation = "relu", input_dim = 11))
#classifier.add(Dropout(rate = 0.1))
classifier.add(Dense(units = 6, kernel_initializer = "uniform", activation = "relu"))
#classifier.add(Dropout(rate = 0.1))
classifier.add(Dense(units = 1, kernel_initializer = "uniform", activation = "sigmoid"))
classifier.compile(optimizer = "rmsprop", loss = "binary_crossentropy", metrics = ['accuracy'])
return classifier
def optimizeClassifier(optimizer):
classifier = Sequential()
classifier.add(Dense(units = 6, kernel_initializer = "uniform", activation = "relu", input_dim = 11))
# classifier.add(Dropout(rate = 0.1))
classifier.add(Dense(units = 6, kernel_initializer = "uniform", activation = "relu"))
# classifier.add(Dropout(rate = 0.1))
classifier.add(Dense(units = 1, kernel_initializer = "uniform", activation = "sigmoid"))
classifier.compile(optimizer = optimizer, loss = "binary_crossentropy", metrics = ['accuracy'])
return classifier
# Load the data
dataset = | pd.read_csv("Churn_Modelling.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
@author: dani
stable version as per 13 March, 2019
# Once per computer, Before the first run, install heteromotility by copying the following line into the console (and hit enter).
! pip install heteromotility
# Minor error is the vertical position of the text in the boxplots, which is currently only dependent on the max value,
# rather than the range. this results in incorrect positions if min value is not (approx) 0 or if it goes into negative
I will probably not fix this, unless for publication purposes, in which case contact me (Dani) to do so
# Non-essential, but for nicer heteromotility output files that don't have unnecessary white lines
# change the file hmio.py line 90 (or thereabouts in future versions)
# from
with open(output_file_path, 'w') as out_file:
# to
with open(output_file_path, 'w',newline='') as out_file:
# make sure the correct number of white spaces at start of line is maintained
"""
#%% Set parameters
# set which script modules to run (set to True/1 or False/0)
MOD_spiderplots = 1 # create spider plots
MOD_run_HM = 1 # runs heteromotility (on files created above)
MOD_DimReduct = 1 # PCA and tSNE analysis of data produced by heteromotility
MOD_ComparisonPlots = 1 # generate plots for individual parameters of heteromotility
# input names that need to be changed or checked for each experiment
#base_dir = 'CF19' # working directory; this script should be saved in the same folder as base_dir #obsolete!
exp_folder = 'CF15' # folder with 1 subfolder per condition with the csv files to be analyzed
exp_name = '' # only *.csv starting with these letters will be analyzed (can be empty string to analyze everything)
# parameters that need can be changed to reflect settings/choices of analysis
track_length = 60 # length interval of tracks to plot/analyze
pre_division_scrapping = 8 # time points before nuclear division to scrap
analysis_interv = 'random' # must be 'first' OR 'last' OR 'random'
move_thresh = 1 # for heteromotility: highest speed to check as a threshold for movement in time_moving and avg_moving_speed parameters (HM default is 10); unsure whether this is in um/frame or px/frame
speed_thresh = 0 # minimum threshold of movement speed to be included in dimension reduction (can be 0); unsure whether this is in um/frame or px/frame
rseed = 22 # random seed (to produce reproducible quasi-ramdom numbers)
# parameters relating to data visualizations
plot_size = 250 # size of spider plot
spider_type = 'condition' # must be 'condition' or 'movie' reflecting what each speiderplot represents
p_dec = 4 # number of decimals shown in p value of ANOVA and Kruskal-Wallis
# Choose features from heteromotility to be used for PCA and t-SNE
# Use Ctrl+1 to include/exclude entire lines, or use #s to remove everything behind
# make sure each line ends with a comma, and that there's a closing square bracket at the very end
DimReductParas = [#'Well/XY', 'cell_id',
'total_distance', 'net_distance', 'linearity', 'spearmanrsq', 'progressivity',
'max_speed', 'min_speed', 'avg_speed',
'MSD_slope', 'hurst_RS', 'nongauss', 'disp_var', 'disp_skew',
'rw_linearity', 'rw_netdist',
'rw_kurtosis01', 'rw_kurtosis02', 'rw_kurtosis03', 'rw_kurtosis04', 'rw_kurtosis05',
'rw_kurtosis06', 'rw_kurtosis07', 'rw_kurtosis08', 'rw_kurtosis09', 'rw_kurtosis10',
'avg_moving_speed01', 'avg_moving_speed02', 'avg_moving_speed03',
'avg_moving_speed04', 'avg_moving_speed05',
'avg_moving_speed06', 'avg_moving_speed07', 'avg_moving_speed08', 'avg_moving_speed09', 'avg_moving_speed10',
'time_moving01', 'time_moving02', 'time_moving03',
'time_moving04', 'time_moving05',
'time_moving06', 'time_moving07', 'time_moving08', 'time_moving09', 'time_moving10',
'autocorr_1', 'autocorr_2', 'autocorr_3', 'autocorr_4', 'autocorr_5',
'autocorr_6', 'autocorr_7', 'autocorr_8', 'autocorr_9', 'autocorr_10',
# 'p_rturn_9_5', 'p_rturn_9_6', 'p_rturn_10_5', 'p_rturn_10_6', 'p_rturn_11_5', 'p_rturn_11_6',
# 'mean_theta_9_5', 'min_theta_9_5', 'max_theta_9_5', 'mean_theta_9_6', 'min_theta_9_6', 'max_theta_9_6',
# 'mean_theta_10_5', 'min_theta_10_5', 'max_theta_10_5', 'mean_theta_10_6', 'min_theta_10_6', 'max_theta_10_6',
# 'mean_theta_11_5', 'min_theta_11_5', 'max_theta_11_5', 'mean_theta_11_6', 'min_theta_11_6', 'max_theta_11_6',
]
# output folder names
spider_outdir = 'X_SpiderPlots' # folder that contains spiderplots
HM_indir = 'X_HM_input' # folder that contains (x,y)-coordinates for heteromotility
HM_outdir = 'X_HM_output' # folder that contains output data from heteromotility
Quant_analys = 'X_QuantAnalysis' # folder that contains graphs for PCA, tSNE, and individual parameters
# parameters for tracking profress of script (don't really need to change)
show_spiderplots = False # turn on to visualize spiderplots in console (mainly for debugging purposes)
print_HM_command = False # turn on to see the name of command line callout for heteromotility for each file in console (mainly for debugging purposes)
print_HM_process = True # turn on to see the process of heteromotility in console
#%% default imports and parameters
import pandas as pd
#import itertools
import sys
import os
from datetime import datetime
import matplotlib.pyplot as plt
from random import seed as rseed
starttime = datetime.now()
rseed(rseed)
outdir_list = []
lens = []
counter = 0
#base_dir = './%s/'%base_dir
base_dir = './' #
input_dir = base_dir + 'data/raw/' + exp_folder + '/'
print(input_dir)
klvkld
base_outdir = base_dir + f'tracklength{track_length}_movethresh{move_thresh}/'
spider_outdir = base_outdir + spider_outdir + '/'
HM_indir = base_outdir+HM_indir+ '/'
HM_outdir = base_outdir+HM_outdir+ '/'
Quant_analys = base_outdir+Quant_analys+ '/'
MOD_HM_input = MOD_HM_input = 1 # generates files that can be read by heteromotility # generates files that can be read by heteromotility
if MOD_spiderplots:
outdir_list.append(spider_outdir)
if MOD_HM_input:
outdir_list.append(HM_indir)
if MOD_run_HM:
outdir_list.append(HM_outdir)
if MOD_DimReduct or MOD_ComparisonPlots:
outdir_list.append(Quant_analys)
for outdir in outdir_list:
if not os.path.exists(outdir):
os.makedirs(outdir)
input_para_set = [
base_dir , exp_folder , exp_name ,
track_length , pre_division_scrapping , analysis_interv , move_thresh , speed_thresh , rseed ,
plot_size , spider_type , p_dec,
DimReductParas,
]
log = f'''
parameters for run on {starttime}
input data:
base_dir = {base_dir}
exp_folder = {exp_folder}
exp_name = {exp_name}
analysis settings:
track_length = {track_length}
pre_division_scrapping = {pre_division_scrapping}
analysis_interv = {analysis_interv}
move_thresh = {move_thresh}
speed_thresh = {speed_thresh}
rseed = {rseed}
visualization:
plot_size = {plot_size}
spider_type = {spider_type}
p_dec = {p_dec}
heteromotility parameters to include in dimenstion reduction:
{[p for p in DimReductParas]}
'''
with open(base_outdir+"log.txt", "w") as text_file:
print(log, file=text_file)
#%% spiderplots and (x,y)-coordinate extraction
if MOD_spiderplots or MOD_HM_input:
SPstart = datetime.now()
print ('starting on spiderplot creation and/or (x,y)-data extraction')
import csv
from random import randint
FOV = (-plot_size,plot_size)
# loop through subdirectories in input dir (excluding output directories)
dirlist = [d for d in os.listdir(input_dir) if (os.path.isdir(input_dir+d) and d not in outdir_list)]
for i,d in enumerate (dirlist):
outfile_x = HM_indir+d+'_x.csv'
outfile_y = HM_indir+d+'_y.csv'
# read input files
file_array = [f for f in os.listdir(input_dir+d) if (f.startswith(exp_name) and f.endswith('.csv'))]
xDict = {}
yDict = {}
for filename in file_array:
spider_df = pd.read_csv(input_dir+d +'/'+ filename, usecols = ['Track ID', 'Position T', 'Position X ', 'Position Y', 'Frame']).rename(columns=lambda x: x.strip())
spider_df.columns = spider_df.columns.str.replace(" ", "_")
spider_df.columns = map(str.upper, spider_df.columns)
# create list of unique track numbers and initialize lists/dictionaries for data storage
tracklist=spider_df.TRACK_ID.unique().tolist()
LenList = []
# find data per track
for track in tracklist:
trackdf = spider_df.loc[spider_df.TRACK_ID == track].reset_index(drop=True)
#scrap split tracks (dividing cells) and X frames preceding division (X = pre_division_scrapping)
if len(trackdf) != len(trackdf.FRAME.unique()):
framelist = list(trackdf.FRAME)
div_point = min([fr for fr in framelist if framelist.count(fr) > 1])-trackdf.FRAME[0]
analysis_end = max(0,div_point - pre_division_scrapping)
trackdf = trackdf[:analysis_end]
if len(trackdf) != len(trackdf.FRAME.unique()):
print('duplicate timepoints still exist at timepoint %i, track %i, file: %s'%(div_point,track,filename))
sys.exit('fix duplicate track error')
# add total length of tracks to list of lengths
lens.append(len(trackdf))
# create length lists per track
if len(trackdf) > track_length:
t0 = randint(0, len(trackdf) - track_length)
# create x- and y- coordinates for each position in each track.
if analysis_interv == 'random':
x_coord,y_coord = trackdf.POSITION_X[t0:t0+track_length], trackdf.POSITION_Y[t0:t0+track_length]
elif analysis_interv == 'last':
x_coord,y_coord = trackdf.POSITION_X[-track_length:], trackdf.POSITION_Y[-track_length:]
elif analysis_interv == 'first':
x_coord,y_coord = trackdf.POSITION_X[:track_length], trackdf.POSITION_Y[:track_length]
x_coord,y_coord = x_coord - list(x_coord)[0], y_coord - list(y_coord)[0]
# 0 total displacement in either x or y crashes heteromotility
if x_coord.max()-x_coord.min()>0 and y_coord.max()-y_coord.min() > 0:
counter +=1
# print (counter, d, filename, track)
# plot track onto spider plot
if MOD_spiderplots:
plt.plot(x_coord,y_coord, linewidth=0.5,alpha=0.5)
# write files into heteromotility output files
if MOD_HM_input:
# wx.writerow([*round(x_coord,3)])
# wy.writerow([*round(y_coord,3)])
xDict[counter] = x_coord
yDict[counter] = y_coord
# plot per file (= per movie)
if spider_type == 'movie' or spider_type == 'file':
plt.title(filename)
plt.xlim(FOV)
plt.ylim(FOV)
plt.savefig(spider_outdir+'spider_'+filename+'.png', bbox_inches='tight',dpi=300)
if show_spiderplots:
plt.show()
plt.close()
# plot per folder (= per condition)
if spider_type == 'condition' or spider_type == 'folder':
plt.title(d)
plt.xlim(FOV)
plt.ylim(FOV)
plt.savefig(spider_outdir+'spider_'+d+'.png', bbox_inches='tight',dpi=300)
if show_spiderplots:
plt.show()
plt.close()
if MOD_HM_input:
with open(outfile_x, 'w',newline='') as outfile_x, open(outfile_y, 'w',newline='') as outfile_y:
wx,wy = csv.writer(outfile_x),csv.writer(outfile_y)
for xkey,xvalue in xDict.items():
wx.writerow([*round(xvalue,3)])
for ykey,yvalue in yDict.items():
wy.writerow([*round(yvalue,3)])
SPfinish = datetime.now()
SPtime = int(round((SPfinish - SPstart).total_seconds(),0))
print ('spiderplots and/or (x,y)-data extraction done after %i seconds'%SPtime)
print ('')
#%% run heteromotility
if MOD_run_HM:
HMstart = datetime.now()
print ('starting to run heteromotility')
import subprocess as sp
file_array = os.listdir(HM_indir)
outdir_files = os.listdir(HM_outdir)
skip_count = 0
for name in file_array:
if 1==0: # replace by line below if I want to skip files already processed
# if 'HMout_'+name[:-6]+'.csv' in outdir_files:
# print ('already processed: '+name)
q=0
elif name.endswith('_x.csv'):
x_file = os.path.abspath(HM_indir)+name
y_file = x_file.replace('_x.csv','_y.csv')
if os.path.exists(y_file):
base_name = name[:-6]
# will print the status and commandline
if print_HM_command:
print ( 'heteromotility.exe',os.path.abspath(HM_outdir),'--tracksX',x_file,'--tracksY',y_file,'--output_suffix',base_name,'--move_thresh',str(move_thresh))
# run heteromotility as if in BASH
sys.exit('fsdjhfjsd')
print ('running heteromotility on ' + base_name)
sp.call(['heteromotility.exe',os.path.abspath(HM_outdir),'--tracksX',x_file,'--tracksY',y_file,'--output_suffix',base_name,'--move_thresh',str(move_thresh)])
elif print_HM_process:
print (y_file + "doesn't exist")
print('--- skipped')
skip_count+=1
HMfinish = datetime.now()
HMtime = int(round((HMfinish - HMstart).total_seconds(),0))
print ('heteromotility processing finished after %i seconds'%HMtime)
print ('')
#%% run dimension reduction
if MOD_DimReduct:
DRstart = datetime.now()
print ('starting on dimension reductions')
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
# from scipy.stats import mannwhitneyu, mstats
import matplotlib
import csv
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
colors=['xkcd:azure','xkcd:darkblue','xkcd:cyan',
'xkcd:sienna','brown',
# 'xkcd:orange','xkcd:red'
]
#colors=[0,1,2,3,4]
HM_files = os.listdir(HM_outdir)
# Read in the csv in pandas, adds a label column = filename, drops all cells slower than speed_thresh
samples = []
for filename in HM_files:
HM_df = pd.read_csv(HM_outdir + filename , usecols = DimReductParas)
HM_df['label'] = filename
samples.append(HM_df[HM_df.avg_speed>speed_thresh])
with open(HM_outdir + filename) as csvFile:
reader = csv.reader(csvFile)
DimReductParas = next(reader)[1:]
# All samples will be put together in df and label column is dropped from df
DRdf = pd.concat(samples, ignore_index = True)
label_df = DRdf.label
DRdf = DRdf.drop(['label'], axis = 1)
# Data is normalized/scaled to ensure equal contribution from all features
# normalized_df = (DRdf - DRdf.min()) / (DRdf.max() - DRdf.min()) #sascha's method, depends heavily on outliers!
normalized_df = (DRdf - DRdf.mean()) / DRdf.std()
# Create a PCA object from sklearn.decomposition
pca = PCA()
# Perform PCA on the normalized data and return transformed principal components
transformed = pca.fit_transform(normalized_df.values)
components = pca.components_
normed_comp = abs(components)/np.sum(abs(components),axis = 0)
# Calculate variance contribution of each principal component (currently unused)
expl_var_ratio = pca.explained_variance_ratio_
# Create a scatter plot of the first two principal components
w, h = plt.figaspect(1.)
pca_fig, pca_ax =plt.subplots(figsize=(w,h))
for x,i in enumerate(HM_files):
pca_ax.scatter(transformed[:,0][label_df == i], transformed[:,1][label_df == i],
label = i[6:-4], alpha=0.5, s=5, )
#, c=colors[x])
# Format PCA graph
pca_ax.legend(#loc='upper center', bbox_to_anchor=(0.5, 1.05),
# ncol=math.ceil(len(HM_files)/2),
fancybox=True, shadow=False, prop={'size': 6},
framealpha=0.75)
pca_ax.spines['right'].set_visible(False)
pca_ax.spines['top'].set_visible(False)
pca_ax.set_xlabel('PC1 (variance ' + str(int(expl_var_ratio[0]*100))+ ' %)')
pca_ax.set_ylabel('PC2 (variance ' + str(int(expl_var_ratio[1]*100))+ ' %)')
# Save PCA plot
pca_fig.savefig(Quant_analys + '0_PCA.png', bbox_inches='tight',dpi=1200)
# pca_fig.savefig(base + '__' + thisdir + '_pca_.pdf', bbox_inches='tight')
plt.close()
DR_halfway = datetime.now()
DRtime = int(round((DR_halfway - DRstart).total_seconds(),0))
print ('PCA done after %i seconds'%DRtime)
# Create t-SNE plot without axis
tsne = TSNE(n_components = 2, init = 'pca', random_state= 0 )
tsne_points = tsne.fit_transform(normalized_df.values)
fig, ax = plt.subplots(figsize=(w,h))
ax.axis('off')
for x,i in enumerate(HM_files):
ax.scatter(tsne_points[:,0][label_df == i], tsne_points[:,1][label_df == i],
label = i[6:-4], alpha=0.5, s=5,)
# c=colors[x])
# Format tSNE graph
ax.legend(#loc='upper left', #bbox_to_anchor=(0.5, 1.05),
ncol=1, fancybox=True, shadow=False, prop={'size': 6},
framealpha=0.75)
# Save t-SNE plot in directory
fig.savefig(Quant_analys + '0_tSNE.png', bbox_inches='tight',dpi=1200)
# fig.savefig(base + '__' + thisdir + '_tsne_.pdf', bbox_inches='tight')
plt.close()
DRfinish = datetime.now()
DRtime = int(round((DR_halfway - DRstart).total_seconds(),0))
print ('tSNE done after %i seconds'%DRtime)
print ('')
#%% create individual plots for each parameter in heteromotility
if MOD_ComparisonPlots:
CPstart = datetime.now()
print ('starting on boxplots')
import seaborn as sns
from scipy.stats import f_oneway as anova
from scipy.stats import kruskal as kwtest
import numpy as np
# load data and format to useful object
samples = []
HM_files = os.listdir(HM_outdir)
for HM_data in HM_files:
HM_df = | pd.read_csv(HM_outdir + HM_data) | pandas.read_csv |
import pandas as pd
import os
import dcase_util
import random
import tempfile
WORKSPACE = "/home/ccyoung/Downloads/dcase2018_task1-master"
def generate_new_meta_csv():
meta_csv_path = os.path.join(WORKSPACE, 'appendixes', 'meta.csv')
df = pd.read_csv(meta_csv_path, sep='\t')
data = df.groupby(['scene_label'])
new_df = pd.DataFrame(data=[], columns=['filename', 'scene_label', 'identifier', 'source_label'])
for k, v in data:
groups = [df for _, df in v.groupby('identifier')]
random.shuffle(groups)
v = pd.concat(groups).reset_index(drop=True)
new_df = | pd.concat([new_df, v]) | pandas.concat |
import json
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import cross_val_score,train_test_split
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
from sklearn.metrics import f1_score, classification_report, confusion_matrix, accuracy_score
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
def save_classification_report(algorithm, y_pred, y_true, labels):
with open(f"report-{algorithm}.txt", "w", encoding="utf-8") as file:
report = classification_report(y_true, y_pred, target_names=labels)
file.write(report)
def plot_confusion_matrix(cm, classes, algorithm):
df = pd.DataFrame(cm, index=classes, columns=classes)
plt.figure(figsize=(10, 9))
sns.heatmap(df, annot=True, cmap=sns.color_palette("Blues"))
plt.tight_layout()
plt.savefig(f"{algorithm}.svg")
df.to_csv(f"{algorithm}-cm.csv")
def save_cross_val(clf, algorithm, data, labels, cv=10):
cross_val = pd.Series(cross_val_score(clf, data, labels, cv=10))
cross_val.to_csv(f"{algorithm}-cross-val.csv", index=False)
if __name__ == "__main__":
f1_scores = list()
accuracies = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 21 10:24:15 2016
@author: <NAME>
In this file, you will find the main filters
and basics function for time series operations
"""
# Filters
# Every filter has to produce a filtered time serie of prices from a sequence of price relatives
"""
Documentation for all the following filters :
INPUT :
dataset : a pandas.dataframe of price relatives to filter
of shape (nb_periods , nb_shares)
params : a dict of parameters
OUTPUT :
f_dataset : the filtered price serie dataset
The resulting dataframe has the following properties :
- same shape than the original dataset,
- NAN values where prediction cannot be made (mostoften first days)
- And at row t, the prediction made in period t
- Note that you have to adjust the dataframe to perform predictive analysis
otherwise you are measuring with knowledge of future information.
Specific documentation added in each function
"""
def MA(dataset, params):
"""
Moving average
params should at least contain
window : window parameter
"""
assert "window" in params, "you should add the window parameter"
p_dataset = to_absolute(dataset)
f_dataset = p_dataset.rolling(window = params["window"]).mean()
return f_dataset
def EMA(dataset, params):
"""
Exponential moving average
params should at least contain
com : is the center of mass parameter
"""
assert "com" in params, "you should add the com (center of mass) parameter"
p_dataset = to_absolute(dataset)
f_dataset = p_dataset.ewm(com=params["com"]).mean()
return f_dataset
def ZLEMA(dataset, params):
"""cf filters"""
# p_dataset = to_absolute(dataset)
def KCA(dataset, params = None):
"""
KCA perform kalman filtering
It is an online implementation of the kalman filter
We fix the seed parameter to 1 since it doesn't affect a lot the way the filter behave in our case.
"""
p_dataset = to_absolute(dataset)
f_dataset = pd.DataFrame()
for stock in p_dataset.columns :
f_dataset[stock] = kalman_filtering(p_dataset[stock])
return f_dataset
from pykalman import KalmanFilter
def kalman_filtering(price_sequence):
h = 1 #time step
A = np.array([[1,h,.5*h**2],
[0,1,h],
[0,0,1]])
Q = np.eye(A.shape[0])
#2) Apply the filter
kf = KalmanFilter(transition_matrices = A , transition_covariance = Q)
means, covariances = kf.filter([price_sequence[0]])
filtered_price_sequence = [means[0,0]]
for i in range(1,len(price_sequence)):
#to track it (streaming)
new_mean, new_covariance = kf.filter_update(means[-1], covariances[-1], price_sequence[i])
means = np.vstack([means,new_mean])
covariances = np.vstack([covariances,new_covariance.reshape((1,3,3))])
filtered_price_sequence.append(means[i,0])
return filtered_price_sequence
# Predictive analysis
def adjust_data(dataset, prediction ,horizon = 1):
"""
Aims to adjust the prediction and the real price relative for the measure of performance
you can adjust the horizon.
"""
assert dataset.shape == prediction.shape
adjusted_prediction = prediction[:-horizon].dropna(axis=0, how='all', inplace=False)
starting_idx = adjusted_prediction.index[0]
adjusted_dataset = dataset[starting_idx+horizon:]
assert adjusted_dataset.shape == adjusted_prediction.shape
return adjusted_dataset, adjusted_prediction
from sklearn.metrics import mean_absolute_error, r2_score
import numpy as np
import pandas as pd
def regression_report(adjusted_dataset, adjusted_prediction, output="all"):
"""
Build a regression task report for the adjusted datasets
report includes
MAE : mean average error
R2 : r2-score
DPA : direction prediction accuracy error
if output = "all" then it outputs the report stock per stock and the average
if output = "average" the it ouputs the average only
"""
df = | pd.DataFrame() | pandas.DataFrame |
import re
import tempfile
from dataclasses import fields
from urllib.error import HTTPError
import click
import pandas as pd
from loguru import logger
from ..etl import collections
from ..etl.core import get_etl_sources
from .scrape import downloaded_pdf, extract_pdf_urls, get_driver
from .utils import RichClickCommand
def extract_parameters(s):
"""Extract year/quarter/month from a string."""
# The patterns to try to match
patterns = [
"FY(?P<fiscal_year>[0-9]{2})[_-]Q(?P<quarter>[1234])", # FYXX-QX
"FY(?P<fiscal_year>[0-9]{2})", # FYXX
"(?P<year>[0-9]{4})[_-](?P<month>[0-9]{2})", # YYYY-MM
]
for pattern in patterns:
match = re.match(pattern, s)
if match:
d = match.groupdict()
if "fiscal_year" in d:
d["fiscal_year"] = "20" + d["fiscal_year"]
return {k: int(v) for k, v in d.items()}
return None
def run_etl(
cls,
dry_run=False,
no_validate=False,
extract_only=False,
fiscal_year=None,
quarter=None,
year=None,
month=None,
**kwargs,
):
"""Internal function to run ETL on fiscal year data."""
# Loop over the PDF files
finished_params = []
for f in cls.get_pdf_files():
# Filter by fiscal year
if fiscal_year is not None:
pattern = f"FY{str(fiscal_year)[2:]}"
if pattern not in f.stem:
continue
# Filter by quarter
if quarter is not None:
pattern = f"Q{quarter}"
if pattern not in f.stem:
continue
# Filter by year
if year is not None:
pattern = f"{year}"
if pattern not in f.stem:
continue
# Filter by month
if month is not None:
pattern = f"{month:02d}"
if pattern not in f.stem:
continue
# Extract parameters
params = extract_parameters(f.stem)
if params is None:
raise ValueError(f"Could not extract parameters from {f.stem}")
# ETL
if not dry_run:
report = None
all_params = {**params, **kwargs}
try:
report = cls(**all_params)
except FileNotFoundError:
pass
all_params_tup = tuple(all_params.items())
# Run the ETL pipeline
if report and all_params_tup not in finished_params:
# Log it
finished_params.append(all_params_tup)
s = ", ".join(f"{k}={v}" for k, v in all_params.items())
logger.info(f"Processing: {s}")
if not extract_only:
report.extract_transform_load(validate=(not no_validate))
else:
report.extract()
def get_etl_function(source, etl):
"""Create and return an the ETL function for the given source."""
options = {
"fiscal_year": "Fiscal year",
"quarter": "Fiscal quarter",
"kind": "Either 'adopted' or 'proposed'",
"year": "Calendar year",
"month": "Calendar month",
}
types = {"kind": click.Choice(["adopted", "proposed"])}
required = ["kind"]
@etl.command(
cls=RichClickCommand,
name=source.__name__,
help=source.__doc__,
)
@click.option("--dry-run", is_flag=True, help="Do not save any new files.")
@click.option("--no-validate", is_flag=True, help="Do not validate the data.")
@click.option(
"--extract-only",
is_flag=True,
help="Only extract the data (do not transform/load).",
)
def etl_source(dry_run, no_validate, extract_only, **kwargs):
# Run the ETL
logger.info(f"Running ETL pipeline for {source.__name__}")
run_etl(
source,
dry_run=dry_run,
no_validate=no_validate,
extract_only=extract_only,
**kwargs,
)
# Add
for field in fields(source):
opt = click.Option(
["--" + field.name.replace("_", "-")],
type=types.get(field.name, int),
help=options[field.name] + ".",
required=field.name in required,
)
etl_source.params.insert(0, opt)
return etl_source
def generate_etl_commands(etl):
"""Generate the ETL commands."""
@etl.command(cls=RichClickCommand, name="CashReport")
@click.option("--fiscal-year", help="Fiscal year.", type=int)
@click.option("--quarter", help="Fiscal quarter.", type=int)
@click.option("--dry-run", is_flag=True, help="Do not save any new files.")
@click.option("--no-validate", is_flag=True, help="Do not validate the data.")
@click.option(
"--extract-only",
is_flag=True,
help="Only extract the data (do not transform/load).",
)
def CashReport(dry_run, no_validate, extract_only, fiscal_year, quarter):
"Run ETL on all Cash Report sources from the QCMR."
# Run the ETL for Cash Report
for source in get_etl_sources()["qcmr"]:
name = source.__name__
if name.startswith("CashReport"):
logger.info(f"Running ETL pipeline for {name}")
run_etl(
source,
dry_run,
no_validate,
extract_only,
fiscal_year=fiscal_year,
quarter=quarter,
)
# Names of the groups
groups = {
"qcmr": "QCMR",
"collections": "Collections",
"spending": "Spending",
}
out = []
# Loop over each group
for group, sources in get_etl_sources().items():
# Track the command names
commands = []
# Add each source
for source in sources:
# Get the etl function for this source
etl_function = get_etl_function(source, etl)
etl.add_command(etl_function)
commands.append(source.__name__)
# add CashReport to QCMR
if group == "qcmr":
etl.add_command(CashReport, name="CashReport")
commands.append("CashReport")
# Add the help group
out.append(
{
"name": groups[group],
"commands": sorted(commands),
}
)
return out
def _get_latest_raw_pdf(cls):
"""Given an ETL class, return the latest PDF in the data directory."""
# Get PDF paths for the raw data files
dirname = cls.get_data_directory("raw")
pdf_files = dirname.glob("*.pdf")
# Get the latest
latest = sorted(pdf_files)[-1]
year, month = map(int, latest.stem.split("_"))
return year, month
def _run_monthly_update(month, year, url, css_identifier, *etls):
"""
Internal function to run update on monthly PDFs.
Parameters
----------
month : int
the latest month that we have data for
year : int
the latest calendar year we have data for
url : str
the url to check
css_identifier : str
the element identifer to scrape
etls : list
the ETL classes to run
"""
# Try to extract out the PDF links from the page
try:
pdf_urls = extract_pdf_urls(url, css_identifier)
except HTTPError as err:
if err.code == 404:
logger.info(f"URL '{url}' does not exist")
return None
else:
raise
# Find out which ones are new
last_dt = pd.to_datetime(f"{month}/{year}")
new_months = [dt for dt in pdf_urls if | pd.to_datetime(dt) | pandas.to_datetime |
# # # # # # # # # # # # # # # # # # # # # # # #
# #
# Module to run real time contingencies #
# By: <NAME> and <NAME> #
# 09-08-2018 #
# Version Aplha-0. 1 #
# #
# Module inputs: #
# -> File name #
# # # # # # # # # # # # # # # # # # # # # # # #
import pandapower as pp
import pandas as pd
import json
import copy
import calendar
from time import time
import datetime
from inspyred import ec
import inspyred
import math
from random import Random
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def Disconet_Asset(net,Asset_type,Asset_to_disc, Service=False):
net_lf = copy.deepcopy(net)
if Asset_type=='GEN': # Disconnect Generators
index = net_lf.sgen.loc[net_lf.sgen['name'] == Asset_to_disc].index[0]
net_lf.sgen.in_service[index] = Service
elif Asset_type=='TR': # Disconnect Transformers
index = net_lf.trafo.loc[net_lf.trafo['name'] == Asset_to_disc].index[0]
net_lf.trafo.in_service[index] = Service
elif Asset_type=='LN': # Disconnect Lines
index = net_lf.line.loc[net_lf.line['name'] == Asset_to_disc].index[0]
net_lf.line.in_service[index] = Service
elif Asset_type=='SW':
index = net_lf.switch.loc[net.switch['name'] == Asset_to_disc].index[0]
net_lf.switch.closed[index] = not Service
elif Asset_type=='LO':
index = net_lf.load.loc[net.load['name'] == Asset_to_disc].index[0]
net_lf.load.in_service[index] = Service
elif Asset_type=='BUS':
index = net_lf.bus.loc[net.bus['name'] == Asset_to_disc].index[0]
net_lf.bus.in_service[index] = Service
elif Asset_type=='ST':
index = net_lf.storage.loc[net.storage['name'] == Asset_to_disc].index[0]
net_lf.storage.in_service[index] = Service
else:
print('Asset to disconnet does not exist')
return net_lf
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def Network_Reconfiguration(net,strategy):
net_lf = copy.deepcopy(net)
for step in strategy:
l_sequence = strategy[step]
asset_type = l_sequence['Element_Type']
asset_to_disc = l_sequence['Element_Name']
net_lf = Disconet_Asset(net_lf,asset_type,asset_to_disc)
return net_lf
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def Load_Contingency_Strategies(File):
with open(File) as json_file:
data = json.load(json_file)
return data
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def Load_AM_Plan(File):
data = Load_Contingency_Strategies(File)
#with open(File) as json_file:
# data = json.load(json_file)
df = pd.DataFrame.from_dict(data, orient='index')
df['Date'] = pd.to_datetime(df['Date'])#pd.to_datetime(df['Date'])
return df
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Funtion to return the daily load growth
def Load_Growth_By_Day(L_growth):
daily_growth = pow(1+L_growth, 1/365)-1 # Daily growth rate
def f_Load_Daily_Growth(ndays): # Daily growth rate fuction
return pow(1+daily_growth,ndays)
return f_Load_Daily_Growth
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Risk assessment
def Power_Risk_assessment(net,secure=1):
assessment = {}
load = net.res_load['p_mw'].fillna(0)*secure
load_base = net.load['p_mw']*net.load.scaling
assessment['Load'] = pd.DataFrame(
{'name':net.load.name,
'ENS':load_base - load,
'ES': load})
assessment['T_ES'] = load.sum()
assessment['T_ENS'] = load_base.sum()-load.sum()
gen_name = pd.concat([net.sgen.name, net.storage.name,net.ext_grid.name], ignore_index=True)
p_gen = pd.concat([net.res_sgen.p_mw, net.res_storage.p_mw,net.res_ext_grid.p_mw], ignore_index=True)
p_gen = p_gen.fillna(0)*secure
net.res_sgen['Type'] = 'D_Gen'
net.res_storage['Type'] = 'Storage'
net.res_ext_grid['Type'] = 'External'
p_source = | pd.concat([net.res_sgen.Type, net.res_storage.Type,net.res_ext_grid.Type], ignore_index=True) | pandas.concat |
from datetime import datetime, timedelta
import time
import pandas as pd
from email.mime.text import MIMEText
from smtplib import SMTP
import pytz
from utility import run_function_till_success
| pd.set_option('expand_frame_repr', False) | pandas.set_option |
# Importing necessary packages
import pandas as pd
import numpy as np
import datetime
import geocoder
from geopy.geocoders import Nominatim
from darksky.api import DarkSky, DarkSkyAsync
from darksky.types import languages, units, weather
# Reading monthly yellow taxi trip data for 2019
df1 = pd.read_csv("yellow_tripdata_2019-01.csv", low_memory = False)
df2 = pd.read_csv("yellow_tripdata_2019-02.csv", low_memory = False)
df3 = pd.read_csv("yellow_tripdata_2019-03.csv", low_memory = False)
df4 = pd.read_csv("yellow_tripdata_2019-04.csv", low_memory = False)
df5 = pd.read_csv("yellow_tripdata_2019-05.csv", low_memory = False)
df6 = pd.read_csv("yellow_tripdata_2019-06.csv", low_memory = False)
df7 = pd.read_csv("yellow_tripdata_2019-07.csv", low_memory = False)
df8 = pd.read_csv("yellow_tripdata_2019-08.csv", low_memory = False)
df9 = pd.read_csv("yellow_tripdata_2019-09.csv", low_memory = False)
df10 = pd.read_csv("yellow_tripdata_2019-10.csv", low_memory = False)
df11 = pd.read_csv("yellow_tripdata_2019-11.csv", low_memory = False)
df12 = pd.read_csv("yellow_tripdata_2019-12.csv", low_memory = False)
# Dropping rows with N/A's in each month
df1.dropna(inplace = True)
df2.dropna(inplace = True)
df3.dropna(inplace = True)
df4.dropna(inplace = True)
df5.dropna(inplace = True)
df6.dropna(inplace = True)
df7.dropna(inplace = True)
df8.dropna(inplace = True)
df9.dropna(inplace = True)
df10.dropna(inplace = True)
df11.dropna(inplace = True)
df12.dropna(inplace = True)
# Concatenating monthly data and removing unnecessary columns
data = | pd.concat([df1, df2, df3, df4, df5, df6, df7, df8, df9, df10, df11, df12]) | pandas.concat |
import numpy as np
import pandas as pd
import os.path
import random
import collections
from bisect import bisect_right
from bisect import bisect_left
from .. import multimatch_gaze as mp
dtype = [
("onset", "<f8"),
("duration", "<f8"),
("label", "<U10"),
("start_x", "<f8"),
("start_y", "<f8"),
("end_x", "<f8"),
("end_y", "<f8"),
("amp", "<f8"),
("peak_vel", "<f8"),
("med_vel", "<f8"),
("avg_vel", "<f8"),
]
def same_sample(run=1, subj=1):
"""duplicate dataset to force exactly similar scanpaths. Choose the run
(integer between 1-8) and whether you want a lab (1) or mri (2) subject"""
if subj == 1:
sub = "sub-30"
else:
sub = "sub-10"
path = os.path.join(
"multimatch_gaze/tests/testdata",
"{}_task-movie_run-{}_events.tsv".format(sub, run),
)
loc = os.path.join(
"multimatch_gaze/tests/testdata", "locations_run-{}_events.tsv".format(run)
)
data = np.recfromcsv(
path,
delimiter="\t",
dtype={
"names": (
"onset",
"duration",
"label",
"start_x",
"start_y",
"end_x",
"end_y",
"amp",
"peak_vel",
"med_vel",
"avg_vel",
),
"formats": (
"f8",
"f8",
"U10",
"f8",
"f8",
"f8",
"f8",
"f8",
"f8",
"f8",
"f8",
),
},
)
data2 = data
shots = pd.read_csv(loc, sep="\t")
return data, data2, shots
def short_shots(run=3):
"""create a shortened shots location annotation to test longshots()"""
loc = os.path.join(
"multimatch_gaze/tests/testdata", "locations_run-{}_events.tsv".format(run)
)
shots = pd.read_csv(loc, sep="\t")
shortshots = shots[0:20]
return shortshots
def mk_fix_vector(length=5):
"""creates a random length x 3 fixation vector in form of a record array"""
fix = np.recarray(
(0,), dtype=[("start_x", "<f8"), ("start_y", "<f8"), ("duration", "<f8")]
)
for i in range(0, length):
fixation = np.array(
(
np.random.uniform(1, 720),
np.random.uniform(1, 720),
np.random.uniform(0.01, 5),
),
dtype=[("start_x", float), ("start_y", float), ("duration", float)],
)
fix = np.append(fix, fixation)
return fix
def mk_strucarray(length=5):
"""create a random scanpath in the data format generateScanpathStructureArray
would output"""
fixation_x = random.sample(range(700), length)
fixation_y = random.sample(range(700), length)
fixation_dur = random.sample(range(5), length)
saccade_x = random.sample(range(700), length - 1)
saccade_y = random.sample(range(700), length - 1)
saccade_lenx = random.sample(range(700), length - 1)
saccade_leny = random.sample(range(700), length - 1)
saccade_rho = random.sample(range(700), length - 1)
saccade_theta = random.sample(range(4), length - 1)
eyedata = dict(
fix=dict(x=fixation_x, y=fixation_y, dur=fixation_dur,),
sac=dict(
x=saccade_x,
y=saccade_y,
lenx=saccade_lenx,
leny=saccade_leny,
theta=saccade_theta,
rho=saccade_rho,
),
)
eyedata2 = dict(
fix=dict(
x=fixation_x[::-1] * 2, y=fixation_y[::-1] * 2, dur=fixation_dur[::-1] * 2,
),
sac=dict(
x=saccade_x[::-1] * 2,
y=saccade_y[::-1] * 2,
lenx=saccade_lenx[::-1] * 2,
leny=saccade_leny[::-1] * 2,
theta=saccade_theta[::-1] * 2,
rho=saccade_rho[::-1] * 2,
),
)
return eyedata, eyedata2
def mk_angles():
"""creates vectors with predefined angular relations. angles1 and angles2
contain the following properties: 1. same 0, 2. 60 diff, 3. 90 diff,
4.120 diff,4. 180 diff (max. dissimilar). They are in sectors (0,1) and
(0, -1).
Angles3 and angles4 contain the same properties reversed and lie in sectors
(-1, 0) and (-1, -1)"""
angles1 = dict(sac=dict(theta=[0, 0.523, 0.785, 1.04, 1.57]))
angles2 = dict(sac=dict(theta=[0, -0.523, -0.785, -1.04, -1.57]))
angles3 = dict(sac=dict(theta=[1.57, 2.093, 2.356, 2.617, 3.14]))
angles4 = dict(sac=dict(theta=[-1.57, -2.093, -2.356, -2.617, -3.14]))
path = [0, 6, 12, 18, 24]
M_assignment = np.arange(5 * 5).reshape(5, 5)
return M_assignment, path, angles1, angles2, angles3, angles4
def mk_durs():
"""create some example duration for test_durationsim()"""
durations1 = collections.OrderedDict()
durations2 = collections.OrderedDict()
durations1 = dict(fix=dict(dur=[0.001, 20.0, 7, -18, -2.0]))
durations2 = dict(fix=dict(dur=[0.008, 18.0, 7, -11, 3.0]))
path = [0, 6, 12, 18, 24]
M_assignment = np.arange(5 * 5).reshape(5, 5)
return M_assignment, path, durations1, durations2
def mk_supershort_shots():
data = {
"onset": np.arange(0, 20),
"duration": np.repeat(1, 20),
"locale": np.repeat("somewhere", 20),
}
shots = | pd.DataFrame(data) | pandas.DataFrame |
import os
import sys
import pandas_alive
import pytest
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from PIL import Image
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, "../..")
@pytest.fixture(scope="function")
def example_dataframe():
test_data = [
[np.random.randint(0, 10000), np.random.randint(0, 10000)],
[np.random.randint(0, 10000), np.random.randint(0, 10000)],
]
test_columns = ["A", "B"]
index_start = datetime(
np.random.randint(2000, 2020),
np.random.randint(1, 12),
np.random.randint(1, 28),
)
index_end = index_start + timedelta(days=np.random.randint(1, 364))
test_index = [index_start, index_end]
return | pd.DataFrame(data=test_data, columns=test_columns, index=test_index) | pandas.DataFrame |
import datetime
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import Timedelta, merge_asof, read_csv, to_datetime
import pandas._testing as tm
from pandas.core.reshape.merge import MergeError
class TestAsOfMerge:
def read_data(self, datapath, name, dedupe=False):
path = datapath("reshape", "merge", "data", name)
x = read_csv(path)
if dedupe:
x = x.drop_duplicates(["time", "ticker"], keep="last").reset_index(
drop=True
)
x.time = to_datetime(x.time)
return x
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.trades = self.read_data(datapath, "trades.csv")
self.quotes = self.read_data(datapath, "quotes.csv", dedupe=True)
self.asof = self.read_data(datapath, "asof.csv")
self.tolerance = self.read_data(datapath, "tolerance.csv")
self.allow_exact_matches = self.read_data(datapath, "allow_exact_matches.csv")
self.allow_exact_matches_and_tolerance = self.read_data(
datapath, "allow_exact_matches_and_tolerance.csv"
)
def test_examples1(self):
""" doc-string examples """
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 3, 7]}
)
result = pd.merge_asof(left, right, on="a")
tm.assert_frame_equal(result, expected)
def test_examples2(self):
""" doc-string examples """
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.048",
"20160525 13:30:00.049",
"20160525 13:30:00.072",
"20160525 13:30:00.075",
]
),
"ticker": [
"GOOG",
"MSFT",
"MSFT",
"MSFT",
"GOOG",
"AAPL",
"GOOG",
"MSFT",
],
"bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
"ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03],
},
columns=["time", "ticker", "bid", "ask"],
)
pd.merge_asof(trades, quotes, on="time", by="ticker")
pd.merge_asof(
trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms")
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.97, np.nan, np.nan, np.nan],
"ask": [np.nan, 51.98, np.nan, np.nan, np.nan],
},
columns=["time", "ticker", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(
trades,
quotes,
on="time",
by="ticker",
tolerance=pd.Timedelta("10ms"),
allow_exact_matches=False,
)
tm.assert_frame_equal(result, expected)
def test_examples3(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, np.nan]}
)
result = pd.merge_asof(left, right, on="a", direction="forward")
tm.assert_frame_equal(result, expected)
def test_examples4(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, 7]}
)
result = pd.merge_asof(left, right, on="a", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_basic(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_categorical(self):
expected = self.asof
trades = self.trades.copy()
trades.ticker = trades.ticker.astype("category")
quotes = self.quotes.copy()
quotes.ticker = quotes.ticker.astype("category")
expected.ticker = expected.ticker.astype("category")
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_left_index(self):
# GH14253
expected = self.asof
trades = self.trades.set_index("time")
quotes = self.quotes
result = merge_asof(
trades, quotes, left_index=True, right_on="time", by="ticker"
)
# left-only index uses right"s index, oddly
expected.index = result.index
# time column appears after left"s columns
expected = expected[result.columns]
tm.assert_frame_equal(result, expected)
def test_basic_right_index(self):
expected = self.asof
trades = self.trades
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_on="time", right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_basic_left_index_right_index(self):
expected = self.asof.set_index("time")
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_index=True, right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_multi_index_on(self):
def index_by_time_then_arbitrary_new_level(df):
df = df.set_index("time")
df = pd.concat([df, df], keys=["f1", "f2"], names=["f", "time"])
return df.reorder_levels([1, 0]).sort_index()
trades = index_by_time_then_arbitrary_new_level(self.trades)
quotes = index_by_time_then_arbitrary_new_level(self.quotes)
expected = index_by_time_then_arbitrary_new_level(self.asof)
result = merge_asof(trades, quotes, on="time", by=["ticker"])
tm.assert_frame_equal(result, expected)
def test_on_and_index(self):
# "on" parameter and index together is prohibited
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, left_on="price", left_index=True, right_index=True
)
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, right_on="bid", left_index=True, right_index=True
)
def test_basic_left_by_right_by(self):
# GH14253
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(
trades, quotes, on="time", left_by="ticker", right_by="ticker"
)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import unittest
import pandas as pd
import numpy as np
from econ_watcher_reader.reader import EconomyWatcherReader
import logging
logging.basicConfig()
logging.getLogger("econ_watcher_reader.reader").setLevel(level=logging.DEBUG)
class TestReaderCurrent(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.kind_ = 'current'
# ----------------
# normal scenarios
# ----------------
def test_getting_data_for_one_month(self):
reader = EconomyWatcherReader()
data = reader.get_data(self.kind_, pd.datetime(2015,10,1), None)
# check column names
self.assertSetEqual(set(data.columns),
{'date', 'reason_type', 'industry', 'region', 'is_tokyo', 'field', 'score', 'reason_sentence'})
def test_getting_data_for_multiple_months(self):
reader = EconomyWatcherReader()
data = reader.get_data('current', pd.datetime(2018, 1, 1), pd.datetime(2018,5,1))
# check data range
self.assertListEqual(
list(pd.date_range(pd.datetime(2018, 1, 1), pd.datetime(2018,5,1), freq='MS').values),
list(np.sort(data.date.unique()))
)
def test_getting_all_available_data(self):
reader = EconomyWatcherReader()
data = reader.get_data('current')
date_in_data_str = ['{:%Y%m%d}'.format(pd.to_datetime(date_)) for date_ in data.date.unique()]
self.assertIn('{:%Y%m%d}'.format(reader.EARLIEST_MONTH), date_in_data_str)
self.assertIn('{:%Y%m%d}'.format(reader.LATEST_MONTH), date_in_data_str)
self.assertGreater(len(date_in_data_str), 2)
# --------------------
# non-normal scenarios
# --------------------
def test_raise_exception(self):
reader = EconomyWatcherReader()
# invalid `kind_` parameter
with self.assertRaises(ValueError):
reader.get_data(kind_= 'invalid', start=pd.datetime(2018, 1, 1))
# invalid `start` parameter
with self.assertRaises(ValueError):
reader.get_data(kind_='current', start=pd.datetime(1945,1,1))
# invalid `end` parameter
with self.assertRaises(ValueError):
reader.get_data(kind_='current', start=pd.datetime(2100, 1, 1))
# pass `start` > `end`
with self.assertRaises(ValueError):
reader.get_data(kind_='current', start=pd.datetime(2018, 1, 1), end=pd.datetime(2017,1,1))
class TestReaderFuture(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.kind_ = 'future'
# ----------------
# normal scenarios
# ----------------
def test_getting_data_for_one_month(self):
reader = EconomyWatcherReader()
data = reader.get_data(self.kind_, pd.datetime(2015,10,1), None)
# check column names
data.to_clipboard()
self.assertSetEqual(set(data.columns),
{'date', 'industry', 'region', 'is_tokyo', 'field', 'score', 'reason_sentence'})
def test_getting_data_for_multiple_months(self):
reader = EconomyWatcherReader()
data = reader.get_data('current', pd.datetime(2018, 1, 1), pd.datetime(2018,5,1))
# check data range
self.assertListEqual(
list(pd.date_range( | pd.datetime(2018, 1, 1) | pandas.datetime |
from scapy.utils import RawPcapReader
from scapy.all import PcapReader, Packet
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, TCP
import scapy.contrib.modbus as mb
from scapy.fields import (
ConditionalField,
Emph,
)
from scapy.config import conf, _version_checker
from scapy.base_classes import BasePacket, Gen, SetGen, Packet_metaclass
import pandas as pd
import requests
import zipfile
from tqdm import tqdm
from pathlib import Path
import os
data_dir = Path("datasets")
data_files = {
"captures1_v2.zip": {
"url": "https://github.com/tjcruz-dei/ICS_PCAPS/releases/download/MODBUSTCP%231/captures1_v2.zip"
},
"captures2.zip": {
"url": "https://github.com/tjcruz-dei/ICS_PCAPS/releases/download/MODBUSTCP%231/captures2.zip"
},
"captures3.zip": {
"url": "https://github.com/tjcruz-dei/ICS_PCAPS/releases/download/MODBUSTCP%231/captures3.zip"
},
}
keep_features = ["Ethernet__type", "IP__src", "IP__dst", "IP__ihl", "IP__tos", "IP__len", "IP__id", "IP__ttl", "IP__flags",
"IP__frag",
"IP__proto", "IP__chksum", "TCP__sport", "TCP__flags",
"TCP__dport", "TCP__seq", "TCP__ack", "TCP__dataofs",
"TCP__reserved", "TCP__options", "TCP__chksum"
"TCP__window", "TCP__chksum", "ModbusADU__transId",
"ModbusADU__protoId", "ModbusADU__len", "ModbusADU__unitId", "time", "UDP__sport", "UDP__dport", "UDP__len",
"UDP__chksum", "IPv6__version", "IPv6__fl", "IPv6__plen", "IPv6__nh", "IPv6__hlim",
"IPv6 Extension Header - Hop-by-Hop Options Header__nh", "BOOTP__hlen", "BOOTP__xid", "BOOTP__secs",
"ICMPv6 Neighbor Discovery - Neighbor Solicitation__type", "ICMPv6 Neighbor Discovery - Neighbor Solicitation__cksum",
"ICMPv6 Neighbor Discovery - Router Solicitation__type", "ICMPv6 Neighbor Discovery - Router Solicitation__cksum",
"ICMP__type", "ICMP__chksum", "ICMP__id", "ICMP__seq", "DHCPv6 Solicit Message__msgtype", "DHCPv6 Solicit Message__trid",
"DHCP6 Elapsed Time Option__optcode", "DHCP6 Elapsed Time Option__optlen", "DHCP6 Elapsed Time Option__elapsedtime",
"DHCP6 Client Identifier Option__optcode", "vendor class data_ |_len", "DHCP6 Option Request Option__optcode",
"DHCP6 Option Request Option__optlen", "IP Option Router Alert_ |_option", "IP Option Router Alert_ |_length",
"Link Local Multicast Node Resolution - Query__id", "Link Local Multicast Node Resolution - Query__qdcount",
"DNS Question Record_ |_qtype", "DNS Question Record_ |_qclass",
"ICMPv6 Neighbor Discovery Option - Source Link-Layer Address__type",
"ICMPv6 Neighbor Discovery Option - Source Link-Layer Address__len",
]
def download(url, outfile, dataset_number=1):
response = requests.get(url, stream=True)
if (response.status_code != 200):
raise Exception("Error downloading data - status code: {}, url: {}".format(response.status_code, url))
total_size = int(response.headers["Content-Length"])
downloaded = 0 # keep track of size downloaded so far
chunkSize = 1024
bars = total_size // chunkSize
with open(str(outfile), "wb") as f:
for chunk in tqdm(response.iter_content(chunk_size=chunkSize), total=bars, unit="KB",
desc="Dataset {}/3".format(dataset_number), leave=True):
f.write(chunk)
downloaded += chunkSize
def pkt_as_dict(pkt, indent=3, lvl="",
label_lvl="",
first_call=True):
# based on scapy show() method
# returns: dict of pkt attributes normally printed by show() plus time
if first_call is True:
ret = {}
else:
ret = first_call
for f in pkt.fields_desc:
if isinstance(f, ConditionalField) and not f._evalcond(pkt):
continue
fvalue = pkt.getfieldval(f.name)
if isinstance(fvalue, Packet) or (f.islist and f.holds_packets and isinstance(fvalue, list)): # noqa: E501
key = "{}_{}".format(label_lvl + lvl, f.name)
fvalue_gen = SetGen(
fvalue,
_iterpacket=0
) # type: SetGen[Packet]
for fvalue in fvalue_gen:
if key in keep_features:
ret[key] = pkt_as_dict(fvalue, indent=indent, label_lvl=label_lvl + lvl + " |",
first_call=ret) # noqa: E501
else:
key = "{}_{}_{}".format(pkt.name, label_lvl + lvl, f.name)
if isinstance(fvalue, str):
fvalue = fvalue.replace("\n", "\n" + " " * (len(label_lvl) + # noqa: E501
len(lvl) +
len(f.name) +
4))
if key in keep_features:
ret[key] = fvalue
if pkt.payload:
pkt_as_dict(pkt.payload,
indent=indent,
lvl=lvl,# + (" " * indent * pkt.show_indent),
label_lvl=label_lvl,
first_call=ret
)
ret["time"] = float(pkt.time)
return ret
def pcap_to_df(file_name, out_csv_name):
file_name = str(file_name)
print('Opening {} ...'.format(file_name))
df = []
for i, pkt in enumerate(PcapReader(file_name)):
data = pkt_as_dict(pkt)
#data["bytes"] = str(pkt)
data["packet_id"] = i + 1
df.append(data)
df = pd.DataFrame(df)
df["filename"] = file_name.split("/")[-1]
if len(file_name.split("/")) > 2:
df["attack_type"] = file_name.split("/")[-2]
df["capturename"] = file_name.split("/")[-3]
df["time_delta"] = df["time"].diff()
df.to_csv(out_csv_name, index=False)
print("done parsing {}".format(file_name))
return df
def download_and_label_data(labels, data_dir):
train_dfs = []
test_dfs = []
for folder in labels["folder"].unique():
zipfilename = data_dir / (folder + ".zip")
try:
archive = zipfile.ZipFile(zipfilename)
except (FileNotFoundError, zipfile.BadZipfile):
download(data_files[folder + ".zip"]["url"], zipfilename)
archive = zipfile.ZipFile(zipfilename)
for pcap_filename in labels[labels["folder"] == folder]["relevant_files"].unique():
pcap_headless_filepath = str(Path(*pcap_filename.parts[1:]))
csv_filename = Path(data_dir) / pcap_headless_filepath.replace("pcap", "csv").replace("/", "_")
if csv_filename.exists():
df = pd.read_csv(csv_filename)
else:
archive.extract(pcap_headless_filepath, data_dir) # remove top level of posix path (data_dir)
df = pcap_to_df(pcap_filename, out_csv_name=csv_filename)
if not "malicious" in df.columns.values:
# label_data
df["malicious"] = 0
df["attack_type"] = "clean"
attack = labels[(labels["folder"] == folder) & (labels["relevant_files"] == pcap_filename) & (labels["malicious"] == 1)]
assert len(attack) <= 1, attack # should only be one or zero attack periods for this code to work
if len(attack):
start, end = attack[["start_packet", "end_packet"]].values.flatten().astype(int)
df.loc[(df["packet_id"] >= start) & (df["packet_id"] < end), "malicious"] = 1
df.loc[df["malicious"] == 1, "attack_type"] = attack["attack"].values[0]
df.to_csv(csv_filename, index=False)
train_test = labels[(labels["folder"] == folder) & (labels["relevant_files"] == pcap_filename)]["train_test"].values[0]
if train_test == "train":
train_dfs.append(df)
else:
test_dfs.append(df)
pd.concat(train_dfs).to_csv(data_dir/"train.csv", index=False)
pd.concat(test_dfs).to_csv(data_dir/"test.csv", index=False)
def compress(zipfilename, filename):
zipObj = zipfile.ZipFile(zipfilename, 'w', compression=zipfile.ZIP_DEFLATED)
zipObj.write(filename)
zipObj.close()
def decompress(zipfilename, outfolder):
zipObj = zipfile.ZipFile(zipfilename)
zipObj.extractall(outfolder)
zipObj.close()
def reconstitute_data(training_set_file, testing_set_file):
for file in tqdm([training_set_file, testing_set_file]):
file = str(file)
decompress(file.replace(".csv", "1.zip"), ".")
decompress(file.replace(".csv", "2.zip"), ".")
pd.concat([ | pd.read_csv(file + "_1") | pandas.read_csv |
import pandas as pd
def generate_demand_csv(input_fn: str, user_data_dir: str):
# Demand
demand = pd.read_excel(input_fn, sheet_name='2.3 EUD', index_col=0, header=1, usecols=range(5))
demand.columns = [x.strip() for x in demand.columns]
demand.index = [x.strip() for x in demand.index]
# Add additional information
demand_aux = pd.read_csv(f"{user_data_dir}/aux_demand.csv", index_col=0)
demand = pd.merge(demand, demand_aux, left_index=True, right_index=True)
# Rename and reorder columns
demand.index.name = 'parameter name'
demand = demand.reset_index()
demand = demand[['Category', 'Subcategory', 'parameter name', 'HOUSEHOLDS',
'SERVICES', 'INDUSTRY', 'TRANSPORTATION', 'Units']]
demand.to_csv(f"{user_data_dir}/Demand.csv", sep=',', index=False)
def generate_resources_csv(input_fn: str, user_data_dir: str):
# Resources
resources = pd.read_excel(input_fn, sheet_name='2.1 RESOURCES', index_col=0, header=1,
usecols=range(5))
resources.index = [x.strip() for x in resources.index]
resources.columns = [x.split(" ")[0] for x in resources.columns]
# Add additional information
resources_aux = pd.read_csv(f"{user_data_dir}/aux_resources.csv", index_col=0)
resources = pd.merge(resources, resources_aux, left_index=True, right_index=True)
# Rename and reorder columns
resources.index.name = 'parameter name'
resources = resources.reset_index()
resources = resources[['Category', 'Subcategory', 'parameter name', 'avail', 'gwp_op', 'c_op', 'einv_op']]
# resources.columns = ['Category', 'Subcategory', 'parameter name', 'Availability', 'Direct and indirect emissions',
# 'Price', 'Direct emissions']
# Add a line with units
units = pd.Series(['', '', 'units', '[GWh/y]', '[ktCO2-eq./GWh]', '[Meuro/GWh]', '[GWh/y]'],
index=resources.columns)
resources = pd.concat((units.to_frame().T, resources), axis=0)
resources.to_csv(f"{user_data_dir}/Resources.csv", sep=',', index=False)
def generate_technologies_csv(input_fn: str, user_data_dir: str):
# Technologies
technologies = pd.read_excel(input_fn, sheet_name='3.2 TECH', index_col=1)
technologies = technologies.drop(technologies.columns[[0]], axis=1)
technologies.index = [x.strip() for x in technologies.index]
# Add additional information
technologies_aux = pd.read_csv(f"{user_data_dir}/aux_technologies.csv", index_col=0)
technologies = pd.merge(technologies, technologies_aux, left_index=True, right_index=True)
# Rename and reorder columns
technologies.index.name = 'parameter name'
technologies = technologies.reset_index()
technologies = technologies[['Category', 'Subcategory', 'Technologies name', 'parameter name', 'c_inv', 'c_maint',
'gwp_constr', 'einv_constr', 'lifetime', 'c_p', 'fmin_perc', 'fmax_perc',
'f_min', 'f_max']]
# Add a line with units
units = pd.Series(['', '', 'Name (simplified)', 'Name (in model and documents)',
'[Meuro/GW],[Meuro/GWh],[Meuro/(Mkmpass/h)],[Meuro/(Mtonkm/h)]',
'[Meuro/GW],[Meuro/GWh],[Meuro/(Mkmpass/h)],[Meuro/(Mtonkm/h)]',
'[ktonCO2_eq/GW],[ktonCO2_eq/GWh],[ktonCO2_eq/(Mkmpass/h)],[ktonCO2_eq/(Mtonkm/h)]',
'[GWh/y]', '[years]', '[]', '[]', '[]', '[GW]', '[GW]'],
index=technologies.columns)
technologies = pd.concat((units.to_frame().T, technologies), axis=0)
technologies.to_csv(f"{user_data_dir}/Technologies.csv", sep=',', index=False)
def generate_layers_csv(input_fn: str, dev_data_dir: str):
# Layers in-out
layers = pd.read_excel(input_fn, sheet_name='3.1 layers_in_out', index_col=1)
layers = layers.drop(layers.columns[0], axis=1)
layers.columns = [x.strip() for x in layers.columns]
layers.to_csv(f"{dev_data_dir}/Layers_in_out.csv", sep=',')
def generate_storage_csv(input_fn: str, dev_data_dir: str):
# Storage eff in
storage_eff_in = pd.read_excel(input_fn, sheet_name='3.3 STO', header=2, nrows=25, index_col=0)
storage_eff_in.index = [x.strip() for x in storage_eff_in.index]
storage_eff_in.to_csv(f"{dev_data_dir}/Storage_eff_in.csv", sep=',')
# Storage eff out
storage_eff_out = pd.read_excel(input_fn, sheet_name='3.3 STO', header=30, nrows=25, index_col=0)
storage_eff_out.index = [x.strip() for x in storage_eff_out.index]
storage_eff_out.to_csv(f"{dev_data_dir}/Storage_eff_out.csv", sep=',')
# Storage characteristics
storage_c = pd.read_excel(input_fn, sheet_name='3.3 STO', header=58, nrows=25, index_col=0)
storage_c.index = [x.strip() for x in storage_c.index]
storage_c.dropna(axis=1).to_csv(f"{dev_data_dir}/Storage_characteristics.csv", sep=',')
def generate_time_series_csv(input_fn: str, dev_data_dir: str):
# Time series
time_series = pd.read_excel(input_fn, sheet_name='1.1 Time Series', index_col=0, header=1,
usecols=range(11), nrows=8761)
time_series = time_series.drop(time_series.columns[0], axis=1)
time_series = time_series.drop(time_series.index[0])
time_series.columns = ["Electricity (%_elec)", "Space Heating (%_sh)",
"Passanger mobility (%_pass)", "Freight mobility (%_freight)",
"PV", "Wind_onshore", "Wind_offshore", "Hydro_river", "Solar"]
time_series.to_csv(f"{dev_data_dir}/Time_series.csv", sep=',')
def estd_excel_to_csv(input_fn: str):
user_data_dir = "../Data/User_data"
dev_data_dir = "../Data/Developer_data"
generate_demand_csv(input_fn, user_data_dir)
generate_resources_csv(input_fn, user_data_dir)
generate_technologies_csv(input_fn, user_data_dir)
generate_layers_csv(input_fn, dev_data_dir)
generate_storage_csv(input_fn, dev_data_dir)
generate_time_series_csv(input_fn, dev_data_dir)
def step1_excel_to_csv(input_fn, dev_data_dir: str, output_dir: str):
change_name_dict = {"Electricity": "Lighting and co",
"PV": "SUN",
"Space Heating": "SH"}
# weights defined by user
user_data_weights = pd.read_excel(input_fn, sheet_name='User Define', index_col=0, header=4, nrows=5, usecols=[0, 6]).squeeze()
user_data_weights.index = [change_name_dict[c] if c in change_name_dict else c for c in user_data_weights.index]
variables = user_data_weights.index
time_series = pd.read_csv(f"{dev_data_dir}/Time_series.csv", index_col=0)#, usecols=range(121), nrows=368)
time_series.columns = [c.split(" (")[0] for c in time_series.columns]
time_series.columns = [change_name_dict[c] if c in change_name_dict else c for c in time_series.columns]
# Keep only the variables for which there is some user-defined weight
time_series = time_series[variables]
# Compute sums of all capacity factors
totals = time_series.sum().round(2)
# Compute .dat table content
updated_time_series = pd.DataFrame(0., index=range(1, 365+1), columns=range(1, 24*len(variables)+1), dtype=float)
for i, variable in enumerate(variables):
for j in range(365):
for k in range(24):
updated_time_series.loc[j+1, k+1+24*i] = \
time_series.loc[j*24+k+1, variable]*user_data_weights[variable]/totals[variable]
# Add header
header = pd.DataFrame(index=['Type', 'Weights', 'Norm'], columns=range(1, 24*len(variables)+1))
for i, variable in enumerate(variables):
columns_range = range(i*24+1, (i+1)*24+1)
header.loc['Type', columns_range] = variable
header.loc['Weights', columns_range] = user_data_weights[variable]
header.loc['Norm', columns_range] = totals[variable]
updated_time_series = | pd.concat((header, updated_time_series)) | pandas.concat |
"""
Functions for building bokeh figure objects from dataframes.
"""
import datetime
import logging
import math
from typing import Optional, Tuple, Union
import numpy as np
import pandas as pd
from bokeh.layouts import Column, Row, gridplot
from bokeh.models import ColumnDataSource, LabelSet, Legend, LegendItem
from bokeh.palettes import brewer, magma
from bokeh.plotting import figure
from bokeh.transform import cumsum
logger = logging.getLogger(__name__)
def create_daily_chart_figure(current_month_df: pd.DataFrame, accountid_mapping: Optional[dict] = None) -> Tuple[figure, float, float]:
"""
Create a cumulative stacked line graph of given AWS accounts.
:param current_month_df: Dataframe containing the current and previous month data
:param accountid_mapping: AccountID to display name mapping for figure labels
"""
# get max record date (assumes values will NOT decrease)
accountids = [i for i in current_month_df.columns if i.isdigit()]
# dates are same for all accounts
# - take a sample to discover the 'last_available_date'
sample_accountid = accountids[0]
account_max_value = current_month_df[[sample_accountid]].max()[0]
if np.isnan(account_max_value):
logger.error(f"No data for sample_accountid={sample_accountid}")
last_available_date = current_month_df[current_month_df[sample_accountid] == account_max_value].index.date[0]
current_cost = float(current_month_df[accountids].max().sum())
previous_cost = float(current_month_df.loc[last_available_date]["previous_month_total"])
percentage_change = round((current_cost / previous_cost - 1.0) * 100, 1)
source = ColumnDataSource(current_month_df)
today = datetime.datetime.utcnow()
previous_day = today - datetime.timedelta(days=2)
today_display_str = today.strftime("%Y-%m-%d")
f = figure(
title=f"AWS Cost ({today_display_str} UTC) ${round(current_cost, 2)} ({percentage_change}%)",
x_axis_type="datetime",
x_axis_label="Date",
y_axis_label="Cost ($)",
plot_width=800,
plot_height=350,
)
f.title.text_font_size = "14pt"
f.line("date", "previous_month_total", source=source, line_color="gray", line_dash="dashed")
def stacked(df):
df_top = df.cumsum(axis=1)
df_bottom = df_top.shift(axis=1)[::-1]
df_stack = | pd.concat([df_bottom, df_top], ignore_index=True) | pandas.concat |
# Copyright (c) 2016 <NAME>
import numpy as np
import pandas as pd
from sklearn import decomposition
import json
import math
import pickle
### Load data
loadPrefix = "import/input/"
# Bins 1, 2, 3 of Up are to be removed later on
dirmagUpA = np.genfromtxt(loadPrefix+"MLM_adcpU_dirmag.csv", skip_header=3, delimiter=",", comments="#", dtype=float, invalid_raise=True)
# Bin 1 of Down is to be removed later on
dirmagDownA = np.genfromtxt(loadPrefix+"MLM_adcpD_dirmag.csv", skip_header=3, delimiter=",", comments="#", dtype=float, invalid_raise=True)
openessA = np.genfromtxt(loadPrefix+"coral_frames2.csv", skip_header=2, delimiter=",", comments="#", dtype=float, invalid_raise=True)
with open(loadPrefix+"scalar_POS434-156_conservativeTemperature_215_original.json") as fp:
ctA = np.asarray(json.load(fp)["data"])
with open(loadPrefix+"scalar_POS434-156_absoluteSalinity_215_original.json") as fp:
saA = np.asarray(json.load(fp)["data"])
with open(loadPrefix+"scalar_POS434-156_potentialDensityAnomaly_215_original.json") as fp:
sigma0A = np.asarray(json.load(fp)["data"])
### Create time series date indices
dateOffset = np.datetime64("2012-06-01T00:00:01Z")
hiResIndexStart = 354185 # in [s]
hiResIndexEnd = 1343285 # in [s] --- shorter: 1342685 --- longer: 9332570
hiResIndexStep = 600 # in [s]
hiResIndex = dateOffset + np.arange(hiResIndexStart, hiResIndexEnd, hiResIndexStep).astype("timedelta64[s]")
ignoreBecauseOfLags = 7
loResIndex = dateOffset + openessA[ignoreBecauseOfLags:,0].astype("timedelta64[s]")
ctIndex = dateOffset + ctA[:,0].astype("timedelta64[s]")
saIndex = dateOffset + saA[:,0].astype("timedelta64[s]")
sigma0Index = dateOffset + sigma0A[:,0].astype("timedelta64[s]")
dirmagUpIndex = dateOffset + dirmagUpA[:,0].astype("timedelta64[s]")
dirmagDownIndex = dateOffset + dirmagDownA[:,0].astype("timedelta64[s]")
### Create original time series / data frames
ctOrig = pd.Series(ctA[:,1], index=ctIndex)
saOrig = pd.Series(saA[:,1], index=saIndex)
sigma0Orig = pd.Series(sigma0A[:,1], index=sigma0Index)
nBinsUnfilteredUp = round((dirmagUpA.shape[1]-1)/2)
dirUpOrig = pd.DataFrame(data=dirmagUpA[:,1:(1+nBinsUnfilteredUp)], index=dirmagUpIndex)
magUpOrig = pd.DataFrame(data=dirmagUpA[:,(1+nBinsUnfilteredUp):], index=dirmagUpIndex)
nBinsUnfilteredDown = round((dirmagDownA.shape[1]-1)/2)
dirDownOrig = | pd.DataFrame(data=dirmagDownA[:,1:(1+nBinsUnfilteredDown)], index=dirmagDownIndex) | pandas.DataFrame |
import numpy as np
np.random.seed(0)
import pandas as pd
import matplotlib.pyplot as plt
import gym
env = gym.make('Taxi-v3')
env.seed(0)
print('观察空间 = {}'.format(env.observation_space))
print('动作空间 = {}'.format(env.action_space))
print('状态数量 = {}'.format(env.observation_space.n))
print('动作数量 = {}'.format(env.action_space.n))
state = env.reset()
taxirow, taxicol, passloc, destidx = env.unwrapped.decode(state)
print(taxirow, taxicol, passloc, destidx)
print('的士位置 = {}'.format((taxirow, taxicol)))
print('乘客位置 = {}'.format(env.unwrapped.locs[passloc]))
print('目标位置 = {}'.format(env.unwrapped.locs[destidx]))
env.render()
class SARSAAgent:
def __init__(self, env, gamma=0.9, learning_rate=0.2, epsilon=.01):
self.gamma = gamma
self.learning_rate = learning_rate
self.epsilon = epsilon
self.action_n = env.action_space.n
self.q = np.zeros((env.observation_space.n, env.action_space.n))
def decide(self, state):
if np.random.uniform() > self.epsilon:
action = self.q[state].argmax()
else:
action = np.random.randint(self.action_n)
return action
def learn(self, state, action, reward, next_state, done, next_action):
u = reward + self.gamma * \
self.q[next_state, next_action] * (1. - done)
td_error = u - self.q[state, action]
self.q[state, action] += self.learning_rate * td_error
def play_sarsa(env, agent, train=False, render=False):
episode_reward = 0
observation = env.reset()
action = agent.decide(observation)
while True:
if render:
env.render()
next_observation, reward, done, _ = env.step(action)
episode_reward += reward
next_action = agent.decide(next_observation) # 终止状态时此步无意义
if train:
agent.learn(observation, action, reward, next_observation, done)
if done:
break
observation, action = next_observation, next_action
return episode_reward
agent = SARSAAgent(env)
# 训练
episodes = 3000
episode_rewards = []
for episode in range(episodes):
episode_reward = play_sarsa(env, agent, train=True)
episode_rewards.append(episode_reward)
plt.plot(episode_rewards)
# 测试
agent.epsilon = 0. # 取消探索
episode_rewards = [play_sarsa(env, agent) for _ in range(100)]
print('平均回合奖励 = {} / {} = {}'.format(sum(episode_rewards),len(episode_rewards), np.mean(episode_rewards)))
print( | pd.DataFrame(agent.q) | pandas.DataFrame |
import pandas as pd
import pandas.testing as pdt
import qiime2
from qiime2.plugin.testing import TestPluginBase
from q2_types.feature_data import DNAFASTAFormat
from genome_sampler.subsample_diversity import subsample_diversity
class TestSubsampleDiversity(TestPluginBase):
package = 'genome_sampler.tests'
def setUp(self):
super().setUp()
def test_subsample_diversity(self):
context_seqs1 = self.get_data_path('context-seqs-1.fasta')
context_seqs1 = DNAFASTAFormat(context_seqs1, 'r')
sel = subsample_diversity(context_seqs1,
percent_id=0.98)
exp_inclusion = pd.Series([True, True, False, False, False, True],
index=['c1', 'c2', 'c3', 'c4', 'c5', 'c6'],
name='inclusion')
exp_metadata = pd.DataFrame(index=['c1', 'c2', 'c3', 'c4', 'c5', 'c6'])
exp_metadata.index.name = 'id'
exp_metadata = qiime2.Metadata(exp_metadata)
| pdt.assert_series_equal(sel.inclusion, exp_inclusion) | pandas.testing.assert_series_equal |
from connections.mysql_connector import MySQL_Connector
from models.topic_modeling import Topic_Modeling
from connections.neo4j_connector import Neo4j_Connector
import os
from datetime import datetime
from gensim import corpora, models, similarities
from models.graph_generator import Graph_Generator
from models.tuple_extractor import Tuple_Extractor
from acessos import read, get_conn, persistir_uma_linha, persistir_multiplas_linhas, replace_df
import pandas as pd
import re
import credentials
import warnings
warnings.filterwarnings('ignore')
connector = MySQL_Connector("conn_orfeu")
conn = connector.return_conn("influencer_br")
neo4j_client = Neo4j_Connector(credentials.neo4j_uri, credentials.neo4j_user, credentials.neo4j_password)
graph_generator = Graph_Generator(neo4j_client)
tuple_extractor = Tuple_Extractor()
def gerar_tuplas(texto):
tuple_extractor.extrair_tupla(texto)
df_tuplas = tuple_extractor.get_ultimas_tuplas_geradas()
return df_tuplas
def gerar_df_doc(texto, video_id=""):
if video_id !="":
data = {'nome': [texto], 'type': ["DOC"], 'id': [video_id]}
else:
data = {'nome': [texto], 'type': ["DOC"]}
df_doc = pd.DataFrame(data=data)
return df_doc
def pre_processar_tuplas(df_tuplas):
df_tuplas['arg1'] = df_tuplas.apply(lambda x: graph_generator.pre_process_text(x['arg1']), axis=1)
df_tuplas['arg2'] = df_tuplas.apply(lambda x: graph_generator.pre_process_text(x['arg2']), axis=1)
df_tuplas['rel'] = df_tuplas.apply(lambda x: graph_generator.pre_process_text(x['rel'], rel=True), axis=1)
return df_tuplas
def get_df_sentencas(df_tuplas):
df_sentencas = | pd.DataFrame() | pandas.DataFrame |
"""Code for the bootstrap uncertainty quantification (BUQ) algorithm."""
import time
import logging
import numpy as np
import pandas as pd
import buq
import models
import tests
def import_time_series_data():
"""Import time series data for model, without any time slicing."""
ts_data = pd.read_csv('data/demand_wind.csv', index_col=0)
ts_data.index = | pd.to_datetime(ts_data.index) | pandas.to_datetime |
#!python
##################################################
# ACCESS QC Module
# Innovation Laboratory
# Center For Molecular Oncology
# Memorial Sloan Kettering Cancer Research Center
# maintainer: <NAME> (<EMAIL>)
#
#
# This module functions as an aggregation step to combine QC metrics
# across Waltz runs on different bam types.
import shutil
import logging
import argparse
import numpy as np
import pandas as pd
from python_tools.constants import *
from python_tools.util import to_csv
def unique_or_tot(x):
if TOTAL_LABEL in x:
return TOTAL_LABEL
else:
return PICARD_LABEL
def get_read_counts_table(path, pool):
"""
This method is only used to generate stats for un-collapsed bams
"""
read_counts_path = os.path.join(path, AGBM_READ_COUNTS_FILENAME)
read_counts = pd.read_csv(read_counts_path, sep='\t')
# Melt our DF to get all values of the on target rate and duplicate rates as values
read_counts = pd.melt(read_counts, id_vars=[SAMPLE_ID_COLUMN], var_name='Category')
# We only want the read counts-related row values
read_counts = read_counts[~read_counts['Category'].isin(['bam', TOTAL_READS_COLUMN, UNMAPPED_READS_COLUMN, 'duplicate_fraction'])]
read_counts['method'] = read_counts['Category'].apply(unique_or_tot)
read_counts['pool'] = pool
# read_counts = read_counts.reset_index(drop=True)
return read_counts
def get_read_counts_total_table(path, pool):
"""
This table is used for "Fraction of Total Reads that Align to the Human Genome" plot
"""
full_path = os.path.join(path, AGBM_READ_COUNTS_FILENAME)
read_counts_total = pd.read_csv(full_path, sep='\t')
col_idx = ~read_counts_total.columns.str.contains(PICARD_LABEL)
read_counts_total = read_counts_total.iloc[:, col_idx]
read_counts_total['AlignFrac'] = read_counts_total[TOTAL_MAPPED_COLUMN] / read_counts_total[TOTAL_READS_COLUMN]
read_counts_total[TOTAL_OFF_TARGET_FRACTION_COLUMN] = 1 - read_counts_total[TOTAL_ON_TARGET_FRACTION_COLUMN]
read_counts_total['pool'] = pool
return read_counts_total
def get_coverage_table(path, pool):
"""
Coverage table
"""
full_path = os.path.join(path, AGBM_COVERAGE_FILENAME)
coverage_table = pd.read_csv(full_path, sep='\t')
coverage_table = pd.melt(coverage_table, id_vars=SAMPLE_ID_COLUMN, var_name='method', value_name='average_coverage')
coverage_table['method'] = coverage_table['method'].str.replace('average_coverage_', '')
coverage_table['pool'] = pool
return coverage_table
def get_collapsed_waltz_tables(path, method, pool):
"""
Creates read_counts, coverage, and gc_bias tables for collapsed bam metrics.
"""
read_counts_table_path = os.path.join(path, AGBM_READ_COUNTS_FILENAME)
read_counts_table = pd.read_csv(read_counts_table_path, sep='\t')
read_counts_table = pd.melt(read_counts_table, id_vars=[SAMPLE_ID_COLUMN], var_name='Category')
read_counts_table = read_counts_table.dropna(axis=0)
read_counts_table['method'] = [method] * len(read_counts_table)
read_counts_table['pool'] = pool
# Todo: merge with get_cov_table
coverage_table_path = '/'.join([path, AGBM_COVERAGE_FILENAME])
coverage_table = pd.read_csv(coverage_table_path, sep='\t', usecols=[0, 1], names=[SAMPLE_ID_COLUMN, 'average_coverage'], header=0)
coverage_table['method'] = [method] * len(coverage_table)
coverage_table['pool'] = pool
gc_bias_table = get_gc_table(method, WALTZ_INTERVALS_FILENAME_SUFFIX, path)
return [read_counts_table, coverage_table, gc_bias_table]
def get_gc_table(curr_method, intervals_filename_suffix, path):
"""
Function to create GC content table
"""
gc_with_cov = pd.DataFrame(columns=GC_BIAS_HEADER)
sample_files = [f for f in os.listdir(path) if intervals_filename_suffix in f]
for sample in sample_files:
filename = os.path.join(path, sample)
curr_table = pd.read_csv(filename, names=WALTZ_INTERVALS_FILE_HEADER, sep='\t')
sample = sample.split('_cl_aln_srt')[0]
newDf = curr_table[[WALTZ_INTERVAL_NAME_COLUMN, WALTZ_PEAK_COVERAGE_COLUMN, WALTZ_GC_CONTENT_COLUMN]].copy()
newDf['method'] = curr_method
newDf[SAMPLE_ID_COLUMN] = sample
gc_with_cov = pd.concat([gc_with_cov, newDf]).sort_values([SAMPLE_ID_COLUMN, WALTZ_INTERVAL_NAME_COLUMN])
return gc_with_cov
def get_bins(tbl):
"""
Create bins from min_gc value to max_gc value in increments of 0.05 (for GC content table)
"""
logging.info('GC table generation')
logging.info(tbl)
min_gc = np.min(tbl['gc'])
max_gc = np.max(tbl['gc'])
start = round(min_gc - np.mod(min_gc, 0.05), 2)
stop = round(max_gc + 0.1 - np.mod(max_gc, 0.05), 2)
all_bins = np.arange(start, stop, step=0.05)
return all_bins
def get_gc_table_average_for_each_sample(tbl):
"""
Creates the GC content table, with each sample represented
"""
tbl = tbl.copy()
# Restrict to just 0.3 --> 0.8 %GC
all_bins = np.arange(0.3, 0.85, 0.05)
tbl[GC_BIN_COLUMN] = pd.cut(tbl['gc'], all_bins)
# Create new column of normalized coverage across intervals, for each combination of sample and method
groups = [METHOD_COLUMN, SAMPLE_ID_COLUMN]
grouped = tbl.groupby(groups)['peak_coverage']
tbl['coverage_norm'] = grouped.transform(lambda x: x / x.mean())
# Upgrading to newer pandas requires us to restrict transform operations to only rows with non-NA values
tbl = tbl[~tbl[GC_BIN_COLUMN].isnull()]
# Calculate mean coverage within each GC bin, after standardizing coverage across whole sample
groups = [METHOD_COLUMN, SAMPLE_ID_COLUMN, GC_BIN_COLUMN]
grouped = tbl.groupby(groups)['coverage_norm']
tbl['coverage_norm_2'] = grouped.transform(lambda x: x.mean())
tbl = tbl[[SAMPLE_ID_COLUMN, 'coverage_norm_2', GC_BIN_COLUMN, METHOD_COLUMN]].copy()
tbl = tbl.drop_duplicates()
tbl = tbl.rename(index=str, columns={'coverage_norm_2': 'coverage'})
tbl = tbl[~tbl.isnull().any(axis=1)]
return tbl
def get_gene_and_probe(interval):
gene_interval_regex = re.compile(r'^.*_.*_.*_.*$')
# Example interval string: exon_AKT1_4a_1
if interval[0:4] == 'exon':
split = interval.split('_')
return split[1], split[2] + '_' + split[3]
# Another example I've encountered: 426_2903_324(APC)_1a
elif gene_interval_regex.match(interval):
split = interval.split('_')
return '_'.join(split[0:2]), '_'.join(split[2:4])
else:
gene, exon = interval.split('_exon_')
return gene, exon
def get_coverage_per_interval(tbl):
"""
Creates table of collapsed coverage per interval
"""
# Coverage per interval Graph comes from unfiltered Bam, Pool A Targets
unfiltered_boolv = (tbl['method'] == UNFILTERED_COLLAPSING_METHOD)
# Filter out MSI & Fingerprinting intervals
exon_boolv = ['exon' in y for y in tbl[WALTZ_INTERVAL_NAME_COLUMN]]
relevant_coverage_columns = [WALTZ_PEAK_COVERAGE_COLUMN, WALTZ_INTERVAL_NAME_COLUMN, SAMPLE_ID_COLUMN]
final_tbl = tbl[unfiltered_boolv & exon_boolv][relevant_coverage_columns]
# Add on new gene and probe columns
gene_probe = [get_gene_and_probe(val) for val in final_tbl[WALTZ_INTERVAL_NAME_COLUMN]]
gene_probe_df = pd.DataFrame(gene_probe, columns=['Gene', 'Probe'])
# Todo: most likely, the reset_index() calls are unnecessary
final_tbl = final_tbl.reset_index(drop=True)
final_tbl = pd.concat([final_tbl, gene_probe_df], axis=1)
final_tbl = final_tbl.reset_index(drop=True)
return final_tbl
def get_coverage_per_interval_exon_level(tbl):
"""
Exon-Level Coverage per Interval Graph comes from Duplex Bam, Pool A Targets
"""
total_boolv = (tbl['method'] == DUPLEX_COLLAPSING_METHOD)
final_tbl = tbl[total_boolv]
return final_tbl
########
# Main #
########
def main():
"""
This method is kept separate to allow for testing of the create_combined_qc_tables() method,
using a mock argparse object
:return:
"""
parser = argparse.ArgumentParser(description='MSK ACCESS QC module', formatter_class=argparse.RawTextHelpFormatter)
# Probe-level QC files, A-Targets
parser.add_argument('-swa', '--standard_waltz_pool_a', type=str, default=None, required=True, action=FullPaths)
parser.add_argument('-mua', '--unfiltered_waltz_pool_a', type=str, default=None, action=FullPaths)
parser.add_argument('-msa', '--simplex_waltz_pool_a', type=str, default=None, action=FullPaths)
parser.add_argument('-mda', '--duplex_waltz_pool_a', type=str, default=None, action=FullPaths)
# Probe-level QC files, B-Targets
parser.add_argument('-swb', '--standard_waltz_pool_b', type=str, default=None, required=True, action=FullPaths)
parser.add_argument('-mub', '--unfiltered_waltz_pool_b', type=str, default=None, action=FullPaths)
parser.add_argument('-msb', '--simplex_waltz_pool_b', type=str, default=None, action=FullPaths)
parser.add_argument('-mdb', '--duplex_waltz_pool_b', type=str, default=None, action=FullPaths)
# Exon-level QC files, A-Targets
parser.add_argument('-swael', '--standard_waltz_metrics_pool_a_exon_level', type=str, default=None, action=FullPaths)
parser.add_argument('-muael', '--unfiltered_waltz_metrics_pool_a_exon_level', type=str, default=None, action=FullPaths)
parser.add_argument('-msael', '--simplex_waltz_metrics_pool_a_exon_level', type=str, default=None, action=FullPaths)
parser.add_argument('-mdael', '--duplex_waltz_metrics_pool_a_exon_level', type=str, default=None, action=FullPaths)
args = parser.parse_args()
create_combined_qc_tables(args)
def copy_fragment_sizes_files(args):
"""
Copy the fragment-sizes.txt files from the Waltz output folders, and create a combined table for all bam types
Fragment Sizes graph comes from Unfiltered Bam, Pool A Targets
Todo: not clean
:param args:
:return:
"""
fragment_sizes_files = [
(args.standard_waltz_pool_a, 'Standard_A'),
(args.unfiltered_waltz_pool_a, 'Unfiltered_A'),
(args.simplex_waltz_pool_a, 'Simplex_A'),
(args.duplex_waltz_pool_a, 'Duplex_A'),
(args.standard_waltz_pool_b, 'Standard_B'),
(args.unfiltered_waltz_pool_b, 'Unfiltered_B'),
(args.simplex_waltz_pool_b, 'Simplex_B'),
(args.duplex_waltz_pool_b, 'Duplex_B'),
]
fragment_sizes_files = [(outname, x[0], x[1]) for outname, x in zip(INSERT_SIZE_OUTPUT_FILE_NAMES, fragment_sizes_files)]
for dst, src, type in fragment_sizes_files:
# Copy to current directory of all aggregated QC info
frag_sizes_path = os.path.join(src, 'fragment-sizes.txt')
# Create combined DataFrame for A and B targets
fragment_sizes_df = pd.read_csv(frag_sizes_path, sep='\t')
fragment_sizes_df = fragment_sizes_df[['FragmentSize', 'TotalFrequency', SAMPLE_ID_COLUMN]]
fragment_sizes_df = fragment_sizes_df.pivot('FragmentSize', SAMPLE_ID_COLUMN, 'TotalFrequency')
# Add in missing rows for insert sizes that weren't represented
new_index = pd.Index(np.arange(1, 800), name='FragmentSize')
fragment_sizes_df = fragment_sizes_df.reindex(new_index).reset_index()
# Replace nan's with 0
fragment_sizes_df = fragment_sizes_df.fillna(0)
to_csv(fragment_sizes_df,os.path.join('.', dst))
def reformat_exon_targets_coverage_file(coverage_per_interval_table):
"""
DMP-specific format for coverage_per_interval_table file
# Todo:
# 1. Need to use average_coverage, not peak_coverage
:param coverage_per_interval_table:
:return:
"""
for method in coverage_per_interval_table[METHOD_COLUMN].unique():
subset = coverage_per_interval_table[coverage_per_interval_table['method'] == method]
subset = subset.pivot('interval_name', SAMPLE_ID_COLUMN, 'peak_coverage')
subset = subset.reset_index().rename(columns={subset.index.name: 'interval_name'})
interval_names_split = subset['interval_name'].str.split(':', expand=True)
# Turn interval_name into Interval and TargetName
subset.insert(0, 'TargetName', interval_names_split.iloc[:,0] + '_' + interval_names_split.iloc[:,2])
subset.insert(0, 'Interval', interval_names_split.iloc[:,3] + ':' + interval_names_split.iloc[:,4])
subset = subset.drop('interval_name', axis=1)
to_csv(subset, 'coverage_per_interval_A_targets_{}.txt'.format(method.replace(' ', '_')))
def create_combined_qc_tables(args):
"""
Read in and concatenate all the tables from their respective waltz output folders
Write these tables to the current directory
:param args: argparse.ArgumentParser with parsed arguments
:return:
"""
read_counts_total_pool_a_table = get_read_counts_total_table(args.standard_waltz_pool_a, POOL_A_LABEL)
read_counts_total_pool_b_table = get_read_counts_total_table(args.standard_waltz_pool_b, POOL_B_LABEL)
read_counts_total_table = pd.concat([read_counts_total_pool_a_table, read_counts_total_pool_b_table])
# Standard, Pools A and B
pool_a_read_counts = get_read_counts_table(args.standard_waltz_pool_a, POOL_A_LABEL)
pool_a_coverage_table = get_coverage_table(args.standard_waltz_pool_a, POOL_A_LABEL)
gc_cov_int_table = get_gc_table(TOTAL_LABEL, WALTZ_INTERVALS_FILENAME_SUFFIX, args.standard_waltz_pool_a)
pool_b_read_counts = get_read_counts_table(args.standard_waltz_pool_b, POOL_B_LABEL)
read_counts_table = pd.concat([pool_b_read_counts, pool_a_read_counts])
pool_b_coverage_table = get_coverage_table(args.standard_waltz_pool_b, POOL_B_LABEL)
coverage_table = pd.concat([pool_b_coverage_table, pool_a_coverage_table])
# Pool-Level, A Targets
unfilt = get_collapsed_waltz_tables(args.unfiltered_waltz_pool_a, UNFILTERED_COLLAPSING_METHOD, POOL_A_LABEL)
simplex = get_collapsed_waltz_tables(args.simplex_waltz_pool_a, SIMPLEX_COLLAPSING_METHOD, POOL_A_LABEL)
duplex = get_collapsed_waltz_tables(args.duplex_waltz_pool_a, DUPLEX_COLLAPSING_METHOD, POOL_A_LABEL)
read_counts_table = pd.concat([read_counts_table, unfilt[0], simplex[0], duplex[0]]).reset_index(drop=True)
coverage_table = pd.concat([coverage_table, unfilt[1], simplex[1], duplex[1]]).reset_index(drop=True)
gc_cov_int_table = | pd.concat([gc_cov_int_table, unfilt[2], simplex[2], duplex[2]]) | pandas.concat |
import pytest
from pandas import (
DataFrame,
Index,
Series,
)
import pandas._testing as tm
@pytest.mark.parametrize("n, frac", [(2, None), (None, 0.2)])
def test_groupby_sample_balanced_groups_shape(n, frac):
values = [1] * 10 + [2] * 10
df = DataFrame({"a": values, "b": values})
result = df.groupby("a").sample(n=n, frac=frac)
values = [1] * 2 + [2] * 2
expected = DataFrame({"a": values, "b": values}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=n, frac=frac)
expected = Series(values, name="b", index=result.index)
tm.assert_series_equal(result, expected)
def test_groupby_sample_unbalanced_groups_shape():
values = [1] * 10 + [2] * 20
df = DataFrame({"a": values, "b": values})
result = df.groupby("a").sample(n=5)
values = [1] * 5 + [2] * 5
expected = DataFrame({"a": values, "b": values}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=5)
expected = Series(values, name="b", index=result.index)
tm.assert_series_equal(result, expected)
def test_groupby_sample_index_value_spans_groups():
values = [1] * 3 + [2] * 3
df = DataFrame({"a": values, "b": values}, index=[1, 2, 2, 2, 2, 2])
result = df.groupby("a").sample(n=2)
values = [1] * 2 + [2] * 2
expected = DataFrame({"a": values, "b": values}, index=result.index)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor
from sklearn.feature_selection import SelectFromModel
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MaxAbsScaler, PolynomialFeatures
from constants import POSTPROCESSED_DATAPATH
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = | pd.read_csv(POSTPROCESSED_DATAPATH, sep=",", header="infer") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 19 16:00:06 2020
@author: <NAME>, FINTECH CONSULTANCY
license: Apache 2.0,
Note: only tested on windows 10
"""
import pandas as pd
import sys
import re
import os
import win32com.client
from docx import *
# Hardcoded for now. Wondering where configurations should go for future iterations
columns = ['Definition', 'Revised Definition', 'DeliveryID','Name', 'Category']
def checkExtension(filename:str, extension:str):
string = ''
for character in filename[-1::-1]:
if character == '.':
break
string += character
if ''.join(string[-1::-1]) != extension:
return(False)
return(True)
def generate_doc(filepath = 'Favorites.xlsx', output_name = 'Example.docx', ID = '1'):
print(filepath)
if type(filepath) != str:
filepath = str(filepath)
if type(output_name) != str:
output_name = str(output_name)
if type(ID) != str:
ID = str(ID)
if not checkExtension(filepath.split('/')[-1], 'xlsx'):
print("1. Error -- Type of file accepted should be an excel file")
return(False)
else:
print("1. Input file is of type xlsx.")
if not checkExtension(output_name.split('/')[-1], 'docx'):
print("2. Error -- Type of outputted should be a docx file")
return(False)
else:
print("2. Output file is of type docx.")
print("Opening the data")
print("Warning: sheet name is 'Sheet1'")
data = pd.read_excel(filepath, 'Sheet1')
count, _ = data.shape
print("Data has succesfully been opened")
''' Check will not work within python. Must be run from shell'''
# if not os.path.exists('Deliverytemplate.docx'):
# print(f"3. Error -- Make sure Deliverytemplate.docx is in the same directory as the {os.path.basename(__file__)}")
# else:
# print(f"3. Deliverytemplate.docx is in the same directory as the {os.path.basename(__file__)}")
print("Writing Revised contnet")
document = Document('Deliverytemplate.docx')
document.add_heading('Manifest', level=1)
p = document.add_paragraph('')
p.add_run('Delivery Type: test \n').bold = True
p.add_run(f'Number of fields: {count}').bold = True
document.save("Revised.docx")
print("Writing Original Content")
original = Document('Deliverytemplate.docx')
original .add_heading('Manifest', level=1)
p = original.add_paragraph('')
p.add_run('Delivery Type: test\n').bold = True
p.add_run(f'Number of fields: {count}').bold = True
original.save("Original.docx")
p = document.add_paragraph('')
po = original.add_paragraph('')
for i in range(count):
if pd.isna(data['Category'].iloc[i]):
pass
else:
p.add_run("Category:").italic = True
p.add_run(f" {data['Category'].iloc[i].strip()}\n")
po.add_run("Category:").italic = True
po.add_run(f" {data['Category'].iloc[i].strip()}\n")
if pd.isna(data['Name'].iloc[i]):
pass
else:
p.add_run('Name:').italic = True
p.add_run(f" {data['Name'].iloc[i].strip()}\n")
po.add_run("Name:").italic = True
po.add_run(f" {data['Name'].iloc[i].strip()}\n")
if | pd.isna(data['Revised Definition'].iloc[i]) | pandas.isna |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods to load and analyse results from experiments."""
import json
import os
from typing import Any, Callable, Dict, Iterable, Mapping, Tuple
import gym
from imitation.util import networks
import numpy as np
import pandas as pd
from stable_baselines.common import vec_env
from evaluating_rewards import serialize
Config = Tuple[Any, ...]
Stats = Mapping[str, Any]
ConfigStatsMapping = Mapping[Config, Stats]
FilterFn = Callable[[Iterable[str]], bool]
PreprocessFn = Callable[[pd.Series], pd.Series]
def to_series(x) -> pd.Series:
s = pd.Series(x)
s.index.names = ("Source", "Target", "Seed")
return s
def average_loss(stats: Stats, n: int = 10) -> float:
"""Compute average loss of last n data points in training."""
loss = pd.DataFrame(stats["loss"])["singleton"]
return loss.iloc[-n:].mean()
def average_unwrapped_loss(stats: Stats) -> float:
"""Compute average "unwrapped" loss (original model vs target)."""
metrics = stats["metrics"]
unwrapped_loss = [v["singleton"]["unwrapped_loss"] for v in metrics]
# Does not change during training, so can take mean over entire array
return np.mean(unwrapped_loss)
def loss_pipeline(
stats: ConfigStatsMapping,
preprocess: Tuple[PreprocessFn] = (),
):
"""Extract losses from stats and visualize in a heatmap."""
loss = {cfg: average_loss(d) for cfg, d in stats.items()}
unwrapped_loss = {cfg: average_unwrapped_loss(d) for cfg, d in stats.items()}
for pre in (to_series,) + preprocess:
loss = pre(loss)
unwrapped_loss = pre(unwrapped_loss)
return {"loss": loss, "unwrapped_loss": unwrapped_loss}
def get_metric(stats: ConfigStatsMapping, key: str, idx: int = -1):
"""Extract affine parameters from training statistics, at epoch idx."""
return {k: v["metrics"][idx]["singleton"][key] for k, v in stats.items()}
def get_affine_from_models(env_name: str, paths: Iterable[str]):
"""Extract affine parameters from reward model."""
venv = vec_env.DummyVecEnv([lambda: gym.make(env_name)])
res = {}
with networks.make_session():
for path in paths:
model = serialize.load_reward(
"evaluating_rewards/RewardModel-v0",
os.path.join(path, "model"),
venv,
)
return model.models["wrapped"][0].get_weights()
return res
def affine_pipeline(
stats: ConfigStatsMapping,
preprocess: Tuple[PreprocessFn] = (),
):
"""Extract final affine parameters from stats and visualize in a heatmap."""
constants = get_metric(stats, "constant")
scales = get_metric(stats, "scale")
for pre in (to_series,) + preprocess:
constants = pre(constants)
scales = pre(scales)
return {"constants": constants, "scales": scales}
def pipeline(stats: ConfigStatsMapping, **kwargs):
"""Run loss and affine pipeline on stats."""
return {"loss": loss_pipeline(stats, **kwargs), "affine": affine_pipeline(stats, **kwargs)}
# TODO(adam): backwards compatibility -- remove once rerun experiments
DATA_ROOT_PREFIXES = [
# Older versions of the code stored absolute paths in config.
# Try and turn these into relative paths for portability.
"/root/output",
"/home/adam/output",
"/mnt/eval_reward/data",
"/mnt/eval_reward_efs/data",
]
def canonicalize_data_root(path: str) -> str:
if path.endswith("dummy"):
path = "dummy"
for root_prefix in DATA_ROOT_PREFIXES:
if path.startswith(root_prefix):
path = path.replace(root_prefix, serialize.get_output_dir())
break
return path
def _canonicalize_cfg_path(cfg: Dict[str, Any]) -> Dict[str, Any]:
cfg = dict(cfg)
for fld in ("source_reward_path", "target_reward_path"):
if fld in cfg:
cfg[fld] = canonicalize_data_root(cfg[fld])
return cfg
def _find_sacred_parent(
path: str, seen: Dict[str, str]
) -> Tuple[Dict[str, Any], Dict[str, Any], str]:
"""Finds first Sacred directory that is in path or a parent.
Args:
path: Path to a directory to start searching from.
seen: A dictionary from parent paths to children.
Returns:
A tuple of the config found and the parent path it is located at.
As a side-effect, adds path to seen.
Raises:
ValueError: if the parent path was already in seen for a different child.
ValueError: no parent path containing a Sacred directory exists.
"""
parent = path
while parent and not os.path.exists(os.path.join(parent, "sacred", "config.json")):
parent = os.path.dirname(parent)
if parent == "/":
parent = ""
if not parent:
raise ValueError(f"No parent of '{path}' contains a Sacred directory.")
if parent in seen and seen[parent] != path:
raise ValueError(
f"index contains two paths '{path}' and '{seen[parent]}' "
f"with common Sacred parent 'f{parent}'."
)
seen[parent] = path
config_path = os.path.join(parent, "sacred", "config.json")
with open(config_path, "r") as f:
config = json.load(f)
run_path = os.path.join(parent, "sacred", "run.json")
with open(run_path, "r") as f:
run = json.load(f)
return config, run, parent
HARDCODED_TYPES = ["evaluating_rewards/Zero-v0"]
def path_to_config(kinds: Iterable[str], paths: Iterable[str]) -> pd.DataFrame:
"""Extracts relevant config parameters from paths in index.
Args:
kinds: An index of reward types.
paths: An index of paths.
Returns:
A MultiIndex consisting of original reward type and seed(s).
"""
seen = {}
res = []
for (kind, path) in zip(kinds, paths):
if kind in HARDCODED_TYPES or path == "dummy":
res.append((kind, "hardcoded", 0, 0))
else:
path = canonicalize_data_root(path)
config, run, path = _find_sacred_parent(path, seen)
if "target_reward_type" in config:
# Learning directly from a reward: e.g. train_{regress,preferences}
pretty_type = {"train_regress": "regress", "train_preferences": "preferences"}
model_type = pretty_type[run["command"]]
res.append((config["target_reward_type"], model_type, config["seed"], 0))
elif "rollout_path" in config:
# Learning from demos: e.g. train_adversarial
config["rollout_path"] = canonicalize_data_root(config["rollout_path"])
rollout_config, _, _ = _find_sacred_parent(config["rollout_path"], seen)
reward_type = rollout_config["reward_type"] or "EnvReward"
reward_args = config["init_trainer_kwargs"]["reward_kwargs"]
state_only = reward_args.get("state_only", False)
model_type = "IRL" + ("-SO" if state_only else "-SA")
res.append((reward_type, model_type, config["seed"], rollout_config["seed"]))
else:
raise ValueError(
f"Unexpected config at '{path}': does not contain "
"'source_reward_type' or 'rollout_path'"
)
names = ["source_reward_type", "model_type", "model_seed", "data_seed"]
return | pd.DataFrame(res, columns=names) | pandas.DataFrame |
# Related third party imports
import pandas as pd
import numpy as np
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, roc_auc_score
import matplotlib.pyplot as plt
import seaborn as sns
class LinearRegressionRecSys(object):
"""
Class used to train logistic regression model in each defined cluster and to provide recommendations.
"""
def __init__(self, portfolio, database, cluster_labels, random_state=None):
"""
Set class variables and check if the database contains the portfolios' IDs.
The database must contain IDs as index and the portfolio must contain `id` as a feature.
:param portfolio: Pandas DataFrame, contains only the portfolio clients' IDs as feature `id`.
:param database: Pandas DataFrame, contains all the companies' IDs as index.
:param cluster_labels: Pandas DataFrame, contains numbers from 0 to max number of clusters. Maps each company to a cluster.
:param random_state: integer, default=None, set random state of internal processess.
"""
# Set internal variables: portfolio, database, cluster_labels
self.portfolio = portfolio
self.database = database
self.cluster_labels = cluster_labels
self.random_state = random_state
# Test - check if database contains portfolios' IDs
print(f"\nTesting Portfolio . . .\n")
print(f"Database size: {self.database.shape[0]}")
print(f"Portfolio size: {self.portfolio.shape[0]}")
assert np.all(self.portfolio["id"].isin(self.database.index)), "Not all the portfolios' ids are in the database"
print("Portfolios' ids are in the database\n")
# Set internal variable: rating_df Pandas DataFrame, with features:
# - id: the id of the company
# - client: if the company is a client (present) on the porfolio
# - cluster: to which cluster the company belongs
rating_df = self.database.reset_index()["id"] # get all IDs
portfolio_flag = rating_df.isin(self.portfolio["id"]) # True means it is a client
portfolio_flag.name = "client"
rating_df = pd.concat([rating_df, portfolio_flag, self.cluster_labels], axis=1) # concatenate IDs, client flag and cluster labels
self.rating_df = rating_df
def _get_cluster_target_df(self, rating_df, cluster):
"""
Returns a Pandas DataFrame with all companies present in the cluster and a pandas series that represents if the company is a client.
:param cluster: integer, cluster from which predictors dataframe will be constructed.
:create self._cluster_df: Pandas DataFrame with features and IDs of all companies present in the cluster.
:create self._target: Pandas Series that represents if the company is a client.
"""
condition = rating_df["cluster"] == cluster # means that we're accessing the right cluster
cluster_ids = rating_df[(condition)]["id"] # gets ids from all companies in the cluster
cluster_df = self.database.loc[cluster_ids, :] # get features from all companies in the cluster
target = rating_df.loc[condition, "client"] # get target for cluster - True means it is a client
self._cluster_df = cluster_df
self._target = target
def train_classifiers(self):
"""
Train logistic regression classifier for each cluster present in the companies dataframe. \
Predictor is a dataframe with all companies features' for each cluster, target is Pandas Series with boolean values indicating if company is client.
Does train test split, SMOTE oversampling, logistic regression training for each cluster.
:create self.train_output: dictionary, contains keys:
-"client_flag": 1 if cluster has no clients, 0 if has.
The following keys are present in the second case:
-"classifier": trained logistic regression object.
-"metrics": dictionary, contains keys:
-"accuracy": accuracy score
-"precision": precision score
-"recall": recall score
-"f1_score": f1 score
-"roc_auc": area under the curve
"""
n_clusters = self.cluster_labels.nunique()[0]
train_output = {}
for cluster in range(n_clusters):
print(f"- Veryfing Cluster {cluster} -\n")
self._get_cluster_target_df(self.rating_df, cluster)
print(f"Cluster size: {self._cluster_df.shape[0]}")
print(f"Clients in cluster: {self._target.sum()}")
print(f"Clients per cluster ratio: {round(100*(self._target.sum() / self._cluster_df.shape[0]), 3)} % \n")
print("Processing:\n")
if self._target.sum() != 0:
client_flag = 0
print("Applying train test split . . .")
X_train, X_test, y_train, y_test = train_test_split(self._cluster_df,
self._target,
test_size=0.3,
stratify=self._target,
random_state=self.random_state)
print("Applying SMOTE oversampling . . .")
X_train, y_train = SMOTE(n_jobs=-1, random_state=self.random_state).fit_resample(X_train, y_train)
print("Training Logistic Regression . . .")
classifier = LogisticRegression(solver="saga",
max_iter=1000,
n_jobs=-1,
class_weight="balanced",
random_state=self.random_state)
classifier.fit(X_train, y_train)
print("Making predictions and saving metrics . . .")
prediction = classifier.predict(X_test)
train_output.update({cluster: {"client_flag": client_flag,
"classifier": classifier,
"metrics": {"accuracy": accuracy_score(y_test, prediction),
"precision": precision_score(y_test, prediction),
"recall": recall_score(y_test, prediction),
"f1_score": f1_score(y_test, prediction),
"roc_auc": roc_auc_score(y_test, prediction)}
}
})
else:
print("Cluster has no clients, saving {'client_flag': 1} in the output dictionary.")
client_flag = 1
train_output.update({cluster: {"client_flag": client_flag}})
print(169*"-"+"\n")
self.train_output = train_output # dict output of the training function
def recommend(self, n_recommendations=10, remove_portfolio_ids=True):
"""
Makes "n_recommendations". Models need to be trained first with method "train_classifiers".
Use method "train_recommend" to do both steps at once.
Recommendations are made for each cluster proportional to the number of clients in them. Recommendations are sorted by their predicted probabilities in descending order.
:param n_recommendations: integer, default=10, number of recommendations to be made.
:param remove_portfolio_ids: boolean, default=True, when False IDs from client companies are mantained in the dataset from which recommendations are made. \
When True, IDs from client companies are removed.
:return recommendations: Pandas DataFrame, contains IDs and predicted probability of recommended clients for portfolio, sorted in descending order by predicted probabilities.
"""
n_clients = self.rating_df["client"].sum() # total number of clients
recs_ratio = (self.rating_df.groupby("cluster").sum() / n_clients) # ratio of recommendations per cluster
recs_per_cluster = round(recs_ratio * n_recommendations, 0) # number of recommendations per cluster
n_clusters = self.cluster_labels.nunique()[0] # number of clusters
try:
self.train_output
except:
raise Exception("Models haven't been trained. Models need to be trained before making recommendations. Use method 'train_classifiers' or 'train_recommend'")
recommendations = | pd.DataFrame() | pandas.DataFrame |
import logging
from pathlib import Path
import pandas as pd
from ..gov import Gov, Matcher
from ..const import FILENAME_GOV_TEST_SET
GOV_URL = "http://wiki-de.genealogy.net/Verlustlisten_Erster_Weltkrieg/Projekt/Ortsnamen"
logger = logging.getLogger(__name__)
class GovTestData:
def __init__(self, gov: Gov, url: str = GOV_URL):
self.gov_url = url
self.gov = gov
self.filename = FILENAME_GOV_TEST_SET
self.filepath = Path(gov.data_root)
# load data
self.data = self.load_gov_test_data()
def load_gov_test_data(self) -> pd.DataFrame:
if not (self.filepath / self.filename).exists():
correction_tables = []
correction_tables.extend(pd.read_html(self.gov_url, attrs={"class": "sortable"}))
correction_tables.extend(pd.read_html(self.gov_url, attrs={"class": "wikitable"}))
df = | pd.concat(correction_tables) | pandas.concat |
"""The noisemodels module contains all noisemodels available in Pastas.
Supported Noise Models
----------------------
.. autosummary::
:nosignatures:
:toctree: ./generated
NoiseModel
NoiseModel2
Examples
--------
By default, a noise model is added to Pastas. It is possible to replace the
default model with different models as follows:
>>> n = ps.NoiseModel()
>>> ml.add_noisemodel(n)
or shorter
>>> ml.add_noisemodel(ps.NoiseModel())
See Also
--------
pastas.model.Model.add_noisemodel
"""
import numpy as np
from pandas import Timedelta, DataFrame, Series
from .decorators import set_parameter, njit
__all__ = ["NoiseModel", "NoiseModel2", "ArmaModel"]
class NoiseModelBase:
_name = "NoiseModelBase"
def __init__(self):
self.nparam = 1
self.name = "noise"
self.parameters = DataFrame(
columns=["initial", "pmin", "pmax", "vary", "name"])
def set_init_parameters(self, oseries=None):
if oseries is not None:
pinit = oseries.index.to_series().diff() / Timedelta(1, "D")
pinit = pinit.median()
else:
pinit = 14.0
self.parameters.loc["noise_alpha"] = (pinit, 1e-5, 5000, True, "noise")
@set_parameter
def set_initial(self, name, value):
"""
Internal method to set the initial parameter value.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, "initial"] = value
@set_parameter
def set_pmin(self, name, value):
"""
Internal method to set the minimum value of the noisemodel.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, "pmin"] = value
@set_parameter
def set_pmax(self, name, value):
"""
Internal method to set the maximum parameter values.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, "pmax"] = value
@set_parameter
def set_vary(self, name, value):
"""
Internal method to set if the parameter is varied.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, "vary"] = value
def to_dict(self):
return {"type": self._name}
@staticmethod
def weights(res, parameters):
return 1
class NoiseModel(NoiseModelBase):
"""
Noise model with exponential decay of the residual and weighting.
Notes
-----
Calculates the noise [1]_ according to:
.. math::
v(t1) = r(t1) - r(t0) * exp(- (\\frac{\\Delta t}{\\alpha})
Note that in the referenced paper, alpha is defined as the inverse of
alpha used in Pastas. The unit of the alpha parameter is always in days.
Examples
--------
It can happen that the noisemodel is used during model calibration
to explain most of the variation in the data. A recommended solution is to
scale the initial parameter with the model timestep, E.g.::
>>> n = NoiseModel()
>>> n.set_initial("noise_alpha", 1.0 * ml.get_dt(ml.freq))
References
----------
.. [1] <NAME>, <NAME>., and <NAME> (2005), Modeling
irregularly spaced residual series as a continuous stochastic
process, Water Resour. Res., 41, W12404, doi:10.1029/2004WR003726.
"""
_name = "NoiseModel"
def __init__(self):
NoiseModelBase.__init__(self)
self.nparam = 1
self.set_init_parameters()
def simulate(self, res, parameters):
"""
Simulate noise from the residuals.
Parameters
----------
res: pandas.Series
The residual series.
parameters: array-like
Alpha parameters used by the noisemodel.
Returns
-------
noise: pandas.Series
Series of the noise.
"""
alpha = parameters[0]
odelt = (res.index[1:] - res.index[:-1]).values / Timedelta("1d")
# res.values is needed else it gets messed up with the dates
res.iloc[1:] -= np.exp(-odelt / alpha) * res.values[:-1]
res.name = "Noise"
return res
@staticmethod
def weights(res, parameters):
"""
Method to calculate the weights for the noise.
Based on the sum of weighted squared noise (SWSI) method.
Parameters
----------
res: pandas.Series
The residual series.
parameters: array-like
Alpha parameters used by the noisemodel.
Returns
-------
w: numpy.ndarray
Array with the weights.
"""
alpha = parameters[0]
odelt = (res.index[1:] - res.index[:-1]).values / Timedelta("1d")
# divide power by 2 as nu / sigma is returned
power = 1.0 / (2.0 * odelt.size)
exp = np.exp(-2.0 / alpha * odelt) # Twice as fast as 2*odelt/alpha
w = np.exp(power * np.sum(np.log(1.0 - exp))) / np.sqrt(1.0 - exp)
w = np.insert(w, 0, 0) # Set first weight to zero
w = Series(w, res.index)
return w
class NoiseModel2(NoiseModelBase):
"""
Noise model with exponential decay of the residual.
Notes
-----
Calculates the noise according to:
.. math::
v(t1) = r(t1) - r(t0) * exp(- (\\frac{\\Delta t}{\\alpha})
The unit of the alpha parameter is always in days.
Examples
--------
It can happen that the noisemodel is used during model calibration
to explain most of the variation in the data. A recommended solution is to
scale the initial parameter with the model timestep, E.g.::
>>> n = NoiseModel()
>>> n.set_initial("noise_alpha", 1.0 * ml.get_dt(ml.freq))
"""
_name = "NoiseModel2"
def __init__(self):
NoiseModelBase.__init__(self)
self.nparam = 1
self.set_init_parameters()
@staticmethod
def simulate(res, parameters):
"""
Simulate noise from the residuals.
Parameters
----------
res : pandas.Series
The residual series.
parameters : array_like
Alpha parameters used by the noisemodel.
Returns
-------
noise: pandas.Series
Series of the noise.
"""
alpha = parameters[0]
odelt = (res.index[1:] - res.index[:-1]).values / Timedelta("1d")
# res.values is needed else it gets messed up with the dates
res.iloc[1:] -= np.exp(-odelt / alpha) * res.values[:-1]
res.iloc[0] = 0
res.name = "Noise"
return res
class NoiseModel3(NoiseModelBase):
"""
Noise model with exponential decay of the residual and weighting.
Differences compared to NoiseModel:
1. First value is residual
2. First weight is 1 / sig_residuals (i.e., delt = infty)
3. Sum of all weights is always 1
Notes
-----
Calculates the noise [1]_ according to:
.. math::
v(t1) = r(t1) - r(t0) * exp(- (\\frac{\\Delta t}{\\alpha})
The unit of the alpha parameter is always in days.
"""
_name = "NoiseModel3"
def __init__(self):
NoiseModelBase.__init__(self)
self.nparam = 1
self.set_init_parameters()
def simulate(self, res, parameters):
"""
Simulate noise from the residuals.
Parameters
----------
res: pandas.Series
The residual series.
parameters: array-like
Alpha parameters used by the noisemodel.
Returns
-------
noise: pandas.Series
Series of the noise.
"""
alpha = parameters[0]
odelt = (res.index[1:] - res.index[:-1]).values / Timedelta("1d")
# res.values is needed else it gets messed up with the dates
v = res.values[1:] - np.exp(-odelt / alpha) * res.values[:-1]
w = np.ones(len(res))
w[1:] = 1 / np.sqrt((1 - np.exp(-2.0 / alpha * odelt)))
w /= np.sum(w) # make sure the sum up to 1
# res.iloc[0] is already the residual
res.iloc[1:] = v
res *= w
res.name = "Noise"
return res
@staticmethod
def weights(res, parameters):
"""
Method to calculate the weights for the noise.
Based on the sum of weighted squared noise (SWSI) method.
Parameters
----------
alpha: float
odelt: numpy.ndarray
Returns
-------
w: pandas.Series
Series of the weights.
"""
alpha = parameters[0]
odelt = (res.index[1:] - res.index[:-1]).values / Timedelta("1d")
# divide power by 2 as nu / sigma is returned
power = 1.0 / (2.0 * odelt.size)
exp = np.exp(-2.0 / alpha * odelt) # Twice as fast as 2*odelt/alpha
w = np.exp(power * np.sum(np.log(1.0 - exp))) / np.sqrt(1.0 - exp)
w = np.insert(w, 0, 1) # Set first weight to one
w = Series(w, res.index)
return w
class ArmaModel(NoiseModelBase):
"""
ARMA(1,1) Noise model to simulate the noise.
Notes
-----
Calculates the noise according to:
.. math::
\\upsilon_t = r_t - r_{t-1} e^{-\\Delta t/\\alpha} - \\upsilon_{t-1}
e^{-\\Delta t/\\beta}
The unit of the alpha parameter is always in days.
Warnings
--------
This model has only been tested on regular time steps and should not be
used for irregular time steps yet.
"""
_name = "ArmaModel"
def __init__(self):
NoiseModelBase.__init__(self)
self.nparam = 2
self.set_init_parameters()
def set_init_parameters(self, oseries=None):
self.parameters.loc["noise_alpha"] = (10, 1e-9, np.inf, True, "noise")
self.parameters.loc["noise_beta"] = (10, 1e-9, np.inf, True, "noise")
def simulate(self, res, parameters):
alpha = parameters[0]
beta = parameters[1]
# Calculate the time steps
odelt = (res.index[1:] - res.index[:-1]).values / Timedelta("1d")
a = self.calculate_noise(res.values, odelt, alpha, beta)
return | Series(index=res.index, data=a, name="Noise") | pandas.Series |
"""
Tasks
-------
Search and transform jsonable structures, specifically to make it 'easy' to make tabular/csv output for other consumers.
Example
~~~~~~~~~~~~~
*give me a list of all the fields called 'id' in this stupid, gnarly
thing*
>>> Q('id',gnarly_data)
['id1','id2','id3']
Observations:
---------------------
1) 'simple data structures' exist and are common. They are tedious
to search.
2) The DOM is another nested / treeish structure, and jQuery selector is
a good tool for that.
3a) R, Numpy, Excel and other analysis tools want 'tabular' data. These
analyses are valuable and worth doing.
3b) Dot/Graphviz, NetworkX, and some other analyses *like* treeish/dicty
things, and those analyses are also worth doing!
3c) Some analyses are best done using 'one-off' and custom code in C, Python,
or another 'real' programming language.
4) Arbitrary transforms are tedious and error prone. SQL is one solution,
XSLT is another,
5) the XPATH/XML/XSLT family is.... not universally loved :) They are
very complete, and the completeness can make simple cases... gross.
6) For really complicated data structures, we can write one-off code. Getting
80% of the way is mostly okay. There will always have to be programmers
in the loop.
7) Re-inventing SQL is probably a failure mode. So is reinventing XPATH, XSLT
and the like. Be wary of mission creep! Re-use when possible (e.g., can
we put the thing into a DOM using
8) If the interface is good, people can improve performance later.
Simplifying
---------------
1) Assuming 'jsonable' structures
2) keys are strings or stringlike. Python allows any hashable to be a key.
for now, we pretend that doesn't happen.
3) assumes most dicts are 'well behaved'. DAG, no cycles!
4) assume that if people want really specialized transforms, they can do it
themselves.
"""
from __future__ import print_function
from collections import namedtuple
import csv
import itertools
from itertools import product
from operator import attrgetter as aget, itemgetter as iget
import operator
import sys
from pandas.compat import map, u, callable, Counter
import pandas.compat as compat
## note 'url' appears multiple places and not all extensions have same struct
ex1 = {
'name': 'Gregg',
'extensions': [
{'id':'hello',
'url':'url1'},
{'id':'gbye',
'url':'url2',
'more': dict(url='url3')},
]
}
## much longer example
ex2 = {u('metadata'): {u('accessibilities'): [{u('name'): u('accessibility.tabfocus'),
u('value'): 7},
{u('name'): u('accessibility.mouse_focuses_formcontrol'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret'), u('value'): False},
{u('name'): u('accessibility.win32.force_disabled'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.startlinksonly'), u('value'): False},
{u('name'): u('accessibility.usebrailledisplay'), u('value'): u('')},
{u('name'): u('accessibility.typeaheadfind.timeout'), u('value'): 5000},
{u('name'): u('accessibility.typeaheadfind.enabletimeout'), u('value'): True},
{u('name'): u('accessibility.tabfocus_applies_to_xul'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.flashBar'), u('value'): 1},
{u('name'): u('accessibility.typeaheadfind.autostart'), u('value'): True},
{u('name'): u('accessibility.blockautorefresh'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret_shortcut.enabled'),
u('value'): True},
{u('name'): u('accessibility.typeaheadfind.enablesound'), u('value'): True},
{u('name'): u('accessibility.typeaheadfind.prefillwithselection'),
u('value'): True},
{u('name'): u('accessibility.typeaheadfind.soundURL'), u('value'): u('beep')},
{u('name'): u('accessibility.typeaheadfind'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.casesensitive'), u('value'): 0},
{u('name'): u('accessibility.warn_on_browsewithcaret'), u('value'): True},
{u('name'): u('accessibility.usetexttospeech'), u('value'): u('')},
{u('name'): u('accessibility.accesskeycausesactivation'), u('value'): True},
{u('name'): u('accessibility.typeaheadfind.linksonly'), u('value'): False},
{u('name'): u('isInstantiated'), u('value'): True}],
u('extensions'): [{u('id'): u('216ee7f7f4a5b8175374cd62150664efe2433a31'),
u('isEnabled'): True},
{u('id'): u('1aa53d3b720800c43c4ced5740a6e82bb0b3813e'), u('isEnabled'): False},
{u('id'): u('01ecfac5a7bd8c9e27b7c5499e71c2d285084b37'), u('isEnabled'): True},
{u('id'): u('1c01f5b22371b70b312ace94785f7b0b87c3dfb2'), u('isEnabled'): True},
{u('id'): u('fb723781a2385055f7d024788b75e959ad8ea8c3'), u('isEnabled'): True}],
| u('fxVersion') | pandas.compat.u |
import pytest
from pandas import Categorical, DataFrame, Series
import pandas.util.testing as tm
def _assert_series_equal_both(a, b, **kwargs):
"""
Check that two Series equal.
This check is performed commutatively.
Parameters
----------
a : Series
The first Series to compare.
b : Series
The second Series to compare.
kwargs : dict
The arguments passed to `tm.assert_series_equal`.
"""
tm.assert_series_equal(a, b, **kwargs)
tm.assert_series_equal(b, a, **kwargs)
def _assert_not_series_equal(a, b, **kwargs):
"""
Check that two Series are not equal.
Parameters
----------
a : Series
The first Series to compare.
b : Series
The second Series to compare.
kwargs : dict
The arguments passed to `tm.assert_series_equal`.
"""
try:
tm.assert_series_equal(a, b, **kwargs)
msg = "The two Series were equal when they shouldn't have been"
pytest.fail(msg=msg)
except AssertionError:
pass
def _assert_not_series_equal_both(a, b, **kwargs):
"""
Check that two Series are not equal.
This check is performed commutatively.
Parameters
----------
a : Series
The first Series to compare.
b : Series
The second Series to compare.
kwargs : dict
The arguments passed to `tm.assert_series_equal`.
"""
_assert_not_series_equal(a, b, **kwargs)
_assert_not_series_equal(b, a, **kwargs)
@pytest.mark.parametrize("data", [range(3), list("abc"), list("áàä")])
def test_series_equal(data):
_assert_series_equal_both(Series(data), Series(data))
@pytest.mark.parametrize(
"data1,data2",
[
(range(3), range(1, 4)),
(list("abc"), list("xyz")),
(list("áàä"), list("éèë")),
(list("áàä"), list(b"aaa")),
(range(3), range(4)),
],
)
def test_series_not_equal_value_mismatch(data1, data2):
_assert_not_series_equal_both(Series(data1), Series(data2))
@pytest.mark.parametrize(
"kwargs",
[
dict(dtype="float64"), # dtype mismatch
dict(index=[1, 2, 4]), # index mismatch
dict(name="foo"), # name mismatch
],
)
def test_series_not_equal_metadata_mismatch(kwargs):
data = range(3)
s1 = Series(data)
s2 = Series(data, **kwargs)
_assert_not_series_equal_both(s1, s2)
@pytest.mark.parametrize("data1,data2", [(0.12345, 0.12346), (0.1235, 0.1236)])
@pytest.mark.parametrize("dtype", ["float32", "float64"])
@pytest.mark.parametrize("check_less_precise", [False, True, 0, 1, 2, 3, 10])
def test_less_precise(data1, data2, dtype, check_less_precise):
s1 = Series([data1], dtype=dtype)
s2 = Series([data2], dtype=dtype)
kwargs = dict(check_less_precise=check_less_precise)
if (check_less_precise is False or check_less_precise == 10) or (
(check_less_precise is True or check_less_precise >= 3)
and abs(data1 - data2) >= 0.0001
):
msg = "Series values are different"
with pytest.raises(AssertionError, match=msg):
tm.assert_series_equal(s1, s2, **kwargs)
else:
_assert_series_equal_both(s1, s2, **kwargs)
@pytest.mark.parametrize(
"s1,s2,msg",
[
# Index
(
Series(["l1", "l2"], index=[1, 2]),
Series(["l1", "l2"], index=[1.0, 2.0]),
"Series\\.index are different",
),
# MultiIndex
(
DataFrame.from_records(
{"a": [1, 2], "b": [2.1, 1.5], "c": ["l1", "l2"]}, index=["a", "b"]
).c,
DataFrame.from_records(
{"a": [1.0, 2.0], "b": [2.1, 1.5], "c": ["l1", "l2"]}, index=["a", "b"]
).c,
"MultiIndex level \\[0\\] are different",
),
],
)
def test_series_equal_index_dtype(s1, s2, msg, check_index_type):
kwargs = dict(check_index_type=check_index_type)
if check_index_type:
with pytest.raises(AssertionError, match=msg):
tm.assert_series_equal(s1, s2, **kwargs)
else:
tm.assert_series_equal(s1, s2, **kwargs)
def test_series_equal_length_mismatch(check_less_precise):
msg = """Series are different
Series length are different
\\[left\\]: 3, RangeIndex\\(start=0, stop=3, step=1\\)
\\[right\\]: 4, RangeIndex\\(start=0, stop=4, step=1\\)"""
s1 = Series([1, 2, 3])
s2 = Series([1, 2, 3, 4])
with pytest.raises(AssertionError, match=msg):
tm.assert_series_equal(s1, s2, check_less_precise=check_less_precise)
def test_series_equal_values_mismatch(check_less_precise):
msg = """Series are different
Series values are different \\(33\\.33333 %\\)
\\[left\\]: \\[1, 2, 3\\]
\\[right\\]: \\[1, 2, 4\\]"""
s1 = | Series([1, 2, 3]) | pandas.Series |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""Overview:
Classify cell candidates and determine true cells
Usage:
HDoG_classifier.py PARAM_FILE
Options:
-h --help Show this screen.
--version Show version.
"""
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import scipy.ndimage
import os.path, joblib
import tifffile
import json
from docopt import docopt
from sklearn.mixture import BayesianGaussianMixture
from sklearn.tree import DecisionTreeClassifier
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.tree import export_graphviz
from sklearn.pipeline import Pipeline
import graphviz
from MergeBrain import WholeBrainCells
dt_classified = np.dtype([
('is_positive', 'bool'),
])
class ThresholdClassifier:
"""
Simple classifier with threshold on first feature.
the data with first feature larger than the specified threshold is regarded as positive.
"""
def __init__(self, threshold):
self.threshold = threshold
def predict(self, X):
return X[:,0] > self.threshold
class LinearClassifier2D:
"""
Sample satisfies `a * X[:,0] + b * X[:,1] + c > 0` is regarded as positive.
"""
def __init__(self, a,b,c):
self.a = a
self.b = b
self.c = c
def predict(self, X):
return self.a * X[:,0] + self.b * X[:,1] + self.c > 0
def get_X_whole(wbc, normalizer_img=None, **kwargs):
info = joblib.load(os.path.join(wbc.wholebrain_images.params["dst_basedir"], "info.pkl"))
X_whole = []
for src_pkl in info.keys():
yname,xname = src_pkl.split("/")[-1].split(".")[0].split("_")
if "FW" in src_pkl:
cellstack = wbc.halfbrain_cells_FW.get_stack_by_xyname(xname=xname, yname=yname)
else:
cellstack = wbc.halfbrain_cells_RV.get_stack_by_xyname(xname=xname, yname=yname)
data_scalemerged_stack = joblib.load(src_pkl)
X = get_X_with_normalizer(cellstack.data_local, data_scalemerged_stack, normalizer_img, is_ave=wbc.is_ave, **kwargs)
X_whole.append(X)
X_whole = np.concatenate(X_whole)
return X_whole
def get_data_scalemerged_whole(wbc, clf=None, normalizer_img=None, return_X=False, **kwargs):
info = joblib.load(os.path.join(wbc.wholebrain_images.params["dst_basedir"], "info.pkl"))
list_data_scalemerged = []
if return_X:
X_whole = []
for src_pkl in info.keys():
yname,xname = src_pkl.split("/")[-1].split(".")[0].split("_")
if "FW" in src_pkl:
cellstack = wbc.halfbrain_cells_FW.get_stack_by_xyname(xname=xname, yname=yname)
else:
cellstack = wbc.halfbrain_cells_RV.get_stack_by_xyname(xname=xname, yname=yname)
data_scalemerged_stack = joblib.load(src_pkl)
if clf:
X = get_X_with_normalizer(cellstack.data_local, data_scalemerged_stack, normalizer_img, is_ave=wbc.is_ave, **kwargs)
if X.shape[0] == 0:
continue
pred = clf.predict(X)
list_data_scalemerged.append(data_scalemerged_stack[data_scalemerged_stack["is_valid"]][pred])
if return_X:
X_whole.append(X)
else:
list_data_scalemerged.append(data_scalemerged_stack[data_scalemerged_stack["is_valid"]])
if return_X:
X = get_X_with_normalizer(cellstack.data_local, data_scalemerged_stack, normalizer_img, is_ave=wbc.is_ave, **kwargs)
X_whole.append(X)
data_scalemerged_whole = np.concatenate(list_data_scalemerged)
if not return_X:
return data_scalemerged_whole
else:
X_whole = np.concatenate(X_whole)
return data_scalemerged_whole, X_whole
def make_density_image(wbc, clf=None, dtype=np.uint16, normalizer_img=None, **kwargs):
data_scalemerged_whole = get_data_scalemerged_whole(wbc, clf, normalizer_img, **kwargs)
depth = int(np.floor(np.max(data_scalemerged_whole["scaled_z"])))
height = int(np.floor(np.max(data_scalemerged_whole["scaled_y"])))
width = int(np.floor(np.max(data_scalemerged_whole["scaled_x"])))
density_img,_ = np.histogramdd(
np.vstack([
data_scalemerged_whole["scaled_z"],
data_scalemerged_whole["scaled_y"],
data_scalemerged_whole["scaled_x"]
]).T,
bins=(depth, height, width),
range=[(0,depth-1),(0,height-1),(0,width-1)]
)
return density_img.astype(dtype)
# Feature Vector
def get_X_with_normalizer(data_local, data_scalemerged, normalizer_img=None, **kwargs):
data_local_valid = data_local[data_scalemerged["is_valid"]]
if data_local_valid.shape[0] == 0:
return np.empty((0,3), dtype=np.float32)
X = get_X_3d(data_local_valid, **kwargs)
if normalizer_img is not None:
data_scalemerged_valid = data_scalemerged[data_scalemerged["is_valid"]]
X[:, 0] -= np.log10(normalizer_img[
np.clip(np.floor(data_scalemerged_valid["scaled_z"]).astype(int),
a_min=0,a_max=normalizer_img.shape[0]-1),
np.clip(np.floor(data_scalemerged_valid["scaled_y"]).astype(int),
a_min=0,a_max=normalizer_img.shape[1]-1),
np.clip(np.floor(data_scalemerged_valid["scaled_x"]).astype(int),
a_min=0,a_max=normalizer_img.shape[2]-1)
])
return X
def get_X_3d(data,
bias=np.array([0, 0, 0]),
scale=np.array([1.0, 1.0, 1.]),
bias_before_log=np.array([0.0,0.0,0.0]),
is_ave=False):
if not is_ave:
_X = np.array([
data["intensity"],
data["structureness"],
data["blobness"]
]).T
else:
_X = np.array([
data["intensity"] / data["size"],
data["structureness"],
data["blobness"]
]).T
X = np.nan_to_num((np.log10(_X + bias_before_log) + bias) * scale)
return X
def predict_unsupervised(X, i_feature_maximize=1, n_components=3, **vargs):
vbgmm = BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_process",
n_components=n_components,
**vargs
)
vbgmm.fit(X)
pred = vbgmm.predict(X)
if i_feature_maximize is None:
return pred
else:
i_pred_cluster = np.argmax(vbgmm.means_[:,i_feature_maximize])
return pred == i_pred_cluster
def train_decision_tree(X, y, max_depth=2):
pipe = Pipeline([
('pca', PCA()),
('tree', DecisionTreeClassifier(max_depth=max_depth))
])
X_train,X_test,y_train,y_test = train_test_split(X, y, test_size=.2, shuffle=True)
pipe.fit(X_train, y_train)
print("Validation Score:{}".format(pipe.score(X_test, y_test)))
pred = pipe.predict(X)
return pipe, pred
def show_decision_tree(pipe, feature_names=["intensity","structureness","blobness"]):
if isinstance(pipe, Pipeline):
clf = pipe.named_steps['tree']
elif isinstance(pipe, DecisionTreeClassifier):
clf = pipe
dot_data = export_graphviz(clf, out_file=None,
feature_names=feature_names,
filled=True, rounded=True,
special_characters=True)
graph = graphviz.Source(dot_data)
return graph
def plot_classification_result(feature_x, feature_y, pred):
sns.scatterplot(feature_x, feature_y, hue=pred)
if np.count_nonzero(pred == 0) > 1:
sns.kdeplot(feature_x[pred==0],feature_y[pred==0], cmap="Blues", shade=True, shade_lowest=False)
if np.count_nonzero(pred == 1) > 1:
sns.kdeplot(feature_x[pred==1],feature_y[pred==1], cmap="Oranges", shade=True, shade_lowest=False)
return
def plot_features_for_each_stacks(r, func_get_X, clf=None):
i_xs = []
i_ys = []
x1s = []
x2s = []
preds=[]
for i_y in range(len(r.stack_ys)):
print(i_y)
for i_x in range(len(r.stack_xs)):
_stack = r.get_stack(i_xy=(i_x,i_y), verbose=False)
if len(_stack.df) < 10000: continue
_X_stack = func_get_X(_stack.df)
if clf: _pred_stack = clf.predict(_X_stack)
for i in range(0,len(_stack.df), 1000):
i_xs.append(i_x)
i_ys.append(i_y)
x1s.append(_X_stack[i,0])
x2s.append(_X_stack[i,1])
if clf: preds.append(_pred_stack[i])
df_plot = pd.DataFrame({
"i_x":pd.Series(i_xs),
"i_y":pd.Series(i_ys),
"X1":pd.Series(x1s),
"X2":pd.Series(x2s),
"predicted": | pd.Series(preds) | pandas.Series |
import os
import copy
import pytest
import numpy as np
import pandas as pd
import pyarrow as pa
from pyarrow import feather as pf
from pyarrow import parquet as pq
from time_series_transform.io.base import io_base
from time_series_transform.io.numpy import (
from_numpy,
to_numpy
)
from time_series_transform.io.pandas import (
from_pandas,
to_pandas
)
from time_series_transform.io.arrow import (
from_arrow_record_batch,
from_arrow_table,
to_arrow_record_batch,
to_arrow_table
)
from time_series_transform.transform_core_api.base import (
Time_Series_Data,
Time_Series_Data_Collection
)
from time_series_transform.io.parquet import (
from_parquet,
to_parquet
)
from time_series_transform.io.feather import (
from_feather,
to_feather
)
@pytest.fixture(scope = 'class')
def dictList_single():
return {
'time': [1, 2],
'data': [1, 2]
}
@pytest.fixture(scope = 'class')
def dictList_collection():
return {
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_single_expandTime():
return {
'data_1':[1],
'data_2':[2]
}
@pytest.fixture(scope = 'class')
def expect_single_seperateLabel():
return [{
'time': [1, 2],
'data': [1, 2]
},
{
'data_label': [1, 2]
}]
@pytest.fixture(scope = 'class')
def expect_collection_seperateLabel():
return [{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
{
'data_label':[1,2,1,2]
}
]
@pytest.fixture(scope = 'class')
def expect_collection_expandTime():
return {
'pad': {
'data_1':[1,1],
'data_2':[2,np.nan],
'data_3':[np.nan,2],
'category':[1,2]
},
'remove': {
'data_1':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandCategory():
return {
'pad': {
'time':[1,2,3],
'data_1':[1,2,np.nan],
'data_2':[1,np.nan,2]
},
'remove': {
'time':[1],
'data_1':[1],
'data_2':[1]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandFull():
return {
'pad': {
'data_1_1':[1],
'data_2_1':[1],
'data_1_2':[2],
'data_2_2':[np.nan],
'data_1_3':[np.nan],
'data_2_3':[2]
},
'remove': {
'data_1_1':[1],
'data_2_1':[1],
}
}
@pytest.fixture(scope = 'class')
def expect_collection_noExpand():
return {
'ignore':{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
'pad': {
'time': [1,2,3,1,2,3],
'data':[1,2,np.nan,1,np.nan,2],
'category':[1,1,1,2,2,2]
},
'remove': {
'time': [1,1],
'data':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def seq_single():
return {
'time':[1,2,3],
'data':[[1,2,3],[11,12,13],[21,22,23]]
}
@pytest.fixture(scope = 'class')
def seq_collection():
return {
'time':[1,2,1,2],
'data':[[1,2],[1,2],[2,2],[2,2]],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_seq_collection():
return {
'data_1_1':[[1,2]],
'data_2_1':[[2,2]],
'data_1_2':[[1,2]],
'data_2_2':[[2,2]]
}
class Test_base_io:
def test_base_io_from_single(self, dictList_single,expect_single_expandTime):
ExpandTimeAns = expect_single_expandTime
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(ts, 'time', None)
timeSeries = io.from_single(False)
for i in timeSeries:
assert timeSeries[i].tolist() == data[i]
timeSeries = io.from_single(True)
for i in timeSeries:
assert timeSeries[i] == ExpandTimeAns[i]
def test_base_io_to_single(self, dictList_single):
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(data, 'time', None)
assert io.to_single() == ts
def test_base_io_from_collection_expandTime(self, dictList_collection,expect_collection_expandTime):
noChange = dictList_collection
expand = expect_collection_expandTime
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(False,True,'ignore')
timeSeries = io.from_collection(False,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandCategory(self, dictList_collection,expect_collection_expandCategory):
noChange = dictList_collection
expand = expect_collection_expandCategory
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(True,False,'ignore')
timeSeries = io.from_collection(True,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandFull(self, dictList_collection,expect_collection_expandFull):
noChange = dictList_collection
expand = expect_collection_expandFull
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(True,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_to_collection(self, dictList_collection):
dataList = dictList_collection
io = io_base(dataList, 'time', 'category')
testData = io.to_collection()
tsd = Time_Series_Data(dataList,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
assert testData== tsc
def test_base_io_from_collection_no_expand(self,dictList_collection,expect_collection_noExpand):
noChange = dictList_collection
expand = expect_collection_noExpand
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(False,False,'ignore')
for i in timeSeries:
np.testing.assert_array_equal(timeSeries[i],expand['ignore'][i])
timeSeries = io.from_collection(False,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
class Test_Pandas_IO:
def test_from_pandas_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
tsd = Time_Series_Data(data,'time')
testData = from_pandas(df,'time',None)
assert tsd == testData
def test_from_pandas_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = from_pandas(df,'time','category')
assert tsc == testData
def test_to_pandas_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_pandas_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_pandas_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_pandas_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_pandas_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
)
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_pandas_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_pandas(tsd,False,False,'ignore',True)
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_pandas(tsc,False,False,'ignore',True)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
test = to_pandas(tsd,False,False,'ignore',False)
pd.testing.assert_frame_equal(test,df,False)
def test_to_pandas_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_pandas(tsc,False,False,'ignore')
pd.testing.assert_frame_equal(df,test,False)
test = to_pandas(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection)
print(test)
print(full)
test = test.reindex(sorted(df.columns), axis=1)
full = full.reindex(sorted(df.columns), axis=1)
pd.testing.assert_frame_equal(test,full,False)
class Test_Numpy_IO:
def test_from_numpy_single(self,dictList_single):
data = dictList_single
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
numpydata = pd.DataFrame(dictList_single).values
testData = from_numpy(numpydata,0,None)
assert tsd == testData
def test_from_numpy_collection(self,dictList_collection):
data = dictList_collection
numpyData = pd.DataFrame(data).values
numpyDataDict = pd.DataFrame(pd.DataFrame(data).values).to_dict('list')
testData = from_numpy(numpyData,0,2)
tsd = Time_Series_Data(numpyDataDict,0)
assert testData == Time_Series_Data_Collection(tsd,0,2)
def test_to_numpy_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
numpyData = pd.DataFrame(data).values
expandTime = pd.DataFrame(expect_single_expandTime).values
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
np.testing.assert_equal(testData,numpyData)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
np.testing.assert_equal(testData,expandTime)
def test_to_numpy_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
results = expect_collection_expandTime
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
results = expect_collection_expandCategory
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
results = expect_collection_expandFull
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
def test_to_numpy_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
results = expect_collection_noExpand
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
ignore_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='ignore'
)
np.testing.assert_equal(ignore_numpy,pd.DataFrame(results['ignore']).values)
def test_to_numpy_seperateLabel_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_numpy(tsd,False,False,'ignore',True)
print(x)
print(y)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_seperateLabel_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_numpy(tsc,False,False,'ignore',True)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
test = to_numpy(tsd,False,False,'ignore',False)
np.testing.assert_equal(df,test)
def test_to_numpy_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_numpy(tsc,False,False,'ignore')
for i in range(len(test)):
if isinstance(test[i][1],np.ndarray):
test[i][1] = test[i][1].tolist()
np.testing.assert_equal(df,test)
test = to_numpy(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection).values
for i in range(len(test[0])):
if isinstance(test[0][i],np.ndarray):
test[0][i] = test[0][i].tolist()
np.testing.assert_equal(full,test)
class Test_Arrow_IO:
def test_from_arrow_table_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_table_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time','category')
assert tsc == testData
def test_from_arrow_batch_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_batch_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time','category')
assert tsc == testData
def test_to_arrow_table_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_arrow_table(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
).to_pandas()
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_arrow_table(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_arrow_table_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
tsc = Time_Series_Data_Collection(tsd,'time','category')
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_arrow_table_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_arrow_table_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_arrow_table_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_arrow_table_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_arrow_table(tsd,False,False,'ignore',True)
x = x.to_pandas()
y = y.to_pandas()
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_arrow_table(tsc,False,False,'ignore',True)
x = x.to_pandas()
y = y.to_pandas()
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
test = to_arrow_table(tsd,False,False,'ignore',False).to_pandas()
pd.testing.assert_frame_equal(test,df,False)
def test_to_arrow_table_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_arrow_table(tsc,False,False,'ignore').to_pandas()
pd.testing.assert_frame_equal(df,test,False)
test = to_arrow_table(tsc,True,True,'ignore').to_pandas()
full = pd.DataFrame(expect_seq_collection)
print(test)
print(full)
test = test.reindex(sorted(df.columns), axis=1)
full = full.reindex(sorted(df.columns), axis=1)
pd.testing.assert_frame_equal(test,full,False)
###
def record_batch_to_pandas(self,batchList):
df = None
for i in batchList:
if df is None:
df = i.to_pandas()
continue
df = df.append(i.to_pandas(),ignore_index = True)
return df
def test_to_arrow_batch_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_arrow_record_batch(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None,
max_chunksize = 1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_arrow_record_batch(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None,
max_chunksize = 1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_arrow_batch_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_arrow_batch_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_arrow_batch_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_arrow_batch_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_arrow_batch_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_arrow_record_batch(tsd,1,False,False,'ignore',True)
x = self.record_batch_to_pandas(x)
y = self.record_batch_to_pandas(y)
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_arrow_record_batch(tsc,1,False,False,'ignore',True)
x = self.record_batch_to_pandas(x)
y = self.record_batch_to_pandas(y)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
class Test_Parquet_IO:
def test_from_parquet_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.Table.from_pandas(df)
pq.write_table(table,'test.parquet')
testData = from_parquet('test.parquet','time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
os.remove('test.parquet')
def test_from_parquet_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.Table.from_pandas(df)
pq.write_table(table,'test_collection.parquet')
testData = from_parquet('test_collection.parquet','time','category')
assert tsc == testData
os.remove('test_collection.parquet')
###########
def test_to_parquet_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
to_parquet(
'test.parquet',
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
to_parquet(
'test.parquet',
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
os.remove('test.parquet')
def test_to_parquet_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_parquet('test.parquet',tsc,False,True,'ignore')
os.remove('test.parquet')
def test_to_parquet_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_parquet('test.parquet',tsc,True,False,'ignore')
os.remove('test.parquet')
def test_to_parquet_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas('test.parquet',tsc,True,True,'ignore')
def test_to_parquet_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
os.remove('test.parquet')
def test_to_parquet_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
to_parquet(['test.parquet','label.parquet'],tsd,False,False,'ignore',True)
x = pq.read_table('test.parquet').to_pandas()
y = pq.read_table('label.parquet').to_pandas()
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
os.remove('test.parquet')
os.remove('label.parquet')
def test_to_parquet_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
to_parquet(['test.parquet','label.parquet'],tsc,False,False,'ignore',True)
x = pq.read_table('test.parquet').to_pandas()
y = pq.read_table('label.parquet').to_pandas()
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
os.remove('test.parquet')
os.remove('label.parquet')
def test_to_parquet_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
to_parquet('test.parquet',tsd,False,False,'ignore',False)
test = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(test,df,False)
os.remove('test.parquet')
def test_to_parquet_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
to_parquet('test.parquet',tsc,False,False,'ignore')
test = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(df,test,False)
to_parquet('test.parquet',tsc,True,True,'ignore')
test = pq.read_table('test.parquet').to_pandas()
full = pd.DataFrame(expect_seq_collection)
print(test)
print(full)
test = test.reindex(sorted(df.columns), axis=1)
full = full.reindex(sorted(df.columns), axis=1)
pd.testing.assert_frame_equal(test,full,False)
os.remove('test.parquet')
class Test_Generator_IO:
def test_from_generator(self):
pass
def test_to_generator(self):
pass
class Test_Feather_IO:
def test_from_feather_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.Table.from_pandas(df)
pf.write_feather(table,'test.feather')
testData = from_feather('test.feather','time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
os.remove('test.feather')
def test_from_feather_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.Table.from_pandas(df)
pf.write_feather(table,'test_collection.feather')
testData = from_feather('test_collection.feather','time','category')
assert tsc == testData
os.remove('test_collection.feather')
###########
def test_to_feather_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = | pd.DataFrame(data) | pandas.DataFrame |
import os
from datetime import datetime, timedelta, timezone
import pandas as pd
from pandas.core.frame import DataFrame
from sklearn.linear_model import LinearRegression
def demand(exp_id, directory, threshold, warmup_sec):
raw_runs = []
# Compute SLI, i.e., lag trend, for each tested configuration
filenames = [filename for filename in os.listdir(directory) if filename.startswith(f"exp{exp_id}") and "lag-trend" in filename and filename.endswith(".csv")]
for filename in filenames:
run_params = filename[:-4].split("_")
dim_value = run_params[1]
instances = run_params[2]
df = pd.read_csv(os.path.join(directory, filename))
input = df
input['sec_start'] = input.loc[0:, 'timestamp'] - input.iloc[0]['timestamp']
regress = input.loc[input['sec_start'] >= warmup_sec] # Warm-Up
X = regress.iloc[:, 1].values.reshape(-1, 1) # values converts it into a numpy array
Y = regress.iloc[:, 2].values.reshape(-1, 1) # -1 means that calculate the dimension of rows, but have 1 column
linear_regressor = LinearRegression() # create object for the class
linear_regressor.fit(X, Y) # perform linear regression
Y_pred = linear_regressor.predict(X) # make predictions
trend_slope = linear_regressor.coef_[0][0]
row = {'load': int(dim_value), 'resources': int(instances), 'trend_slope': trend_slope}
raw_runs.append(row)
runs = | pd.DataFrame(raw_runs) | pandas.DataFrame |
#from scipy.stats import chi2
import argparse
import sys
import numpy as np
import pandas as pd
import itertools
#ARGS = None
pnames = ["PRIOR-0", "PRIOR-1", "LIK-0", "LIK-1", "LIK", "POST-0", "POST-1"]
def sigmoid(x, derivative=False):
return x*(1-x) if derivative else 1/(1+np.exp(-x))
def main(args):
param_file = args.param_file
input_file = args.input_file
output_file = args.output_file
# Read pgenmi input and make GENE rownames and remove column
din = pd.read_csv(input_file, sep="\t")
din.index = din["GENE"]
din = din.iloc[:,1:]
dout = din
genes = din.index
# Read pgenmi output and extract w,a and make name (H_ALL,H_KD,etc..) rownames
dpfull = | pd.read_csv(param_file, sep="\t") | pandas.read_csv |
from __future__ import absolute_import, print_function, division
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib import rc
import pandas as pd
import logging
import json
import numpy as np
import datetime
from better.tools.indicator import max_drawdown, sharpe, positive_count, negative_count, moving_accumulate
from better.tools.configprocess import parse_time, check_input_same
from better.tools.shortcut import execute_backtest
# the dictionary of name of indicators mapping to the function of related indicators
# input is portfolio changes
INDICATORS = {"portfolio value": np.prod,
"sharpe ratio": sharpe,
"max drawdown": max_drawdown,
"positive periods": positive_count,
"negative periods": negative_count,
"postive day": lambda pcs: positive_count(moving_accumulate(pcs, 48)),
"negative day": lambda pcs: negative_count(moving_accumulate(pcs, 48)),
"postive week": lambda pcs: positive_count(moving_accumulate(pcs, 336)),
"negative week": lambda pcs: negative_count(moving_accumulate(pcs, 336)),
"average": np.mean}
NAMES = {"best": "Best Stock (Benchmark)",
"crp": "UCRP (Benchmark)",
"ubah": "UBAH (Benchmark)",
"anticor": "ANTICOR",
"olmar": "OLMAR",
"pamr": "PAMR",
"cwmr": "CWMR",
"rmr": "RMR",
"ons": "ONS",
"up": "UP",
"eg": "EG",
"bk": "BK",
"corn": "CORN",
"m0": "M0",
"wmamr": "WMAMR"
}
def plot_backtest(config, algos, labels=None):
"""
@:param config: config dictionary
@:param algos: list of strings representing the name of algorithms or index of better result
"""
results = []
for i, algo in enumerate(algos):
if algo.isdigit():
results.append(np.cumprod(_load_from_summary(algo, config)))
logging.info("load index "+algo+" from csv file")
else:
logging.info("start executing "+algo)
results.append(np.cumprod(execute_backtest(algo, config)))
logging.info("finish executing "+algo)
start, end = _extract_test(config)
timestamps = np.linspace(start, end, len(results[0]))
dates = [datetime.datetime.fromtimestamp(int(ts)-int(ts)%config["input"]["global_period"])
for ts in timestamps]
weeks = mdates.WeekdayLocator()
days = mdates.DayLocator()
rc("font", **{"family": "sans-serif", "sans-serif": ["Helvetica"],
"size": 8})
"""
styles = [("-", None), ("--", None), ("", "+"), (":", None),
("", "o"), ("", "v"), ("", "*")]
"""
fig, ax = plt.subplots()
fig.set_size_inches(9, 5)
for i, pvs in enumerate(results):
if len(labels) > i:
label = labels[i]
else:
label = NAMES[algos[i]]
ax.semilogy(dates, pvs, linewidth=1, label=label)
#ax.plot(dates, pvs, linewidth=1, label=label)
plt.ylabel("portfolio value $p_t/p_0$", fontsize=12)
plt.xlabel("time", fontsize=12)
xfmt = mdates.DateFormatter("%m-%d %H:%M")
ax.xaxis.set_major_locator(weeks)
ax.xaxis.set_minor_locator(days)
datemin = dates[0]
datemax = dates[-1]
ax.set_xlim(datemin, datemax)
ax.xaxis.set_major_formatter(xfmt)
plt.grid(True)
plt.tight_layout()
ax.legend(loc="upper left", prop={"size":10})
fig.autofmt_xdate()
plt.savefig("result.eps", bbox_inches='tight',
pad_inches=0)
plt.show()
def table_backtest(config, algos, labels=None, format="raw",
indicators=list(INDICATORS.keys())):
"""
@:param config: config dictionary
@:param algos: list of strings representing the name of algorithms
or index of better result
@:param format: "raw", "html", "latex" or "csv". If it is "csv",
the result will be save in a csv file. otherwise only print it out
@:return: a string of html or latex code
"""
results = []
labels = list(labels)
for i, algo in enumerate(algos):
if algo.isdigit():
portfolio_changes = _load_from_summary(algo, config)
logging.info("load index " + algo + " from csv file")
else:
logging.info("start executing " + algo)
portfolio_changes = execute_backtest(algo, config)
logging.info("finish executing " + algo)
indicator_result = {}
for indicator in indicators:
indicator_result[indicator] = INDICATORS[indicator](portfolio_changes)
results.append(indicator_result)
if len(labels)<=i:
labels.append(NAMES[algo])
dataframe = | pd.DataFrame(results, index=labels) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.model_selection import GridSearchCV, learning_curve,ShuffleSplit, train_test_split
import os
import time
import shap
import xgboost as xgb
areas = ['CE']
data_version = '2021-07-14_3'
#targets = ['g1','g2','q','r','D','mu_w_0','mu_a_0','RoCof','nadir','MeanDevInFirstHalf','Loglike']
targets = ['MeanDevInFirstHalf']#['vari','nadir','nad','g1','g2','D','q','r']#,'RoCoFLong','mu_w_0','mu_a_0','RoCof','MeanDevInFirstHalf','Loglike']
start_time = time.time()
for area in areas:
print('---------------------------- ', area, ' ------------------------------------')
#data_folder = './prepared_data/{}/version-{}/'.format(area,data_version)
data_folder = './prepared_data/{}/{}/'.format(area,data_version)
for target in targets:
print('-------- ', target, ' --------')
res_folder = './Results/model_fit/{}/version-{}/target_{}/'.format(area,data_version, target)
if not os.path.exists(res_folder):
os.makedirs(res_folder)
y_train = pd.read_hdf(data_folder+'y_train.h5').loc[:, target]
y_test = pd.read_hdf(data_folder+'y_test.h5').loc[:, target]
if os.path.exists(res_folder+'y_pred.h5'):
y_pred = pd.read_hdf(res_folder+'y_pred.h5')
y_pred_cont = | pd.read_hdf(res_folder+'y_pred_cont.h5') | pandas.read_hdf |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 20 11:41:36 2021
@author: Koustav
"""
import os
import glob
import matplotlib.pyplot as plt
import seaborn as sea
import numpy as np
import pandas as pan
import math
import matplotlib.ticker as mtick
from scipy.optimize import curve_fit
def expo(x, a, b, c):
return a + b*(np.exp(c*x))
def yandu():
binder=[]
#Saves the values of the critical exponent
for i in range(0,40):
base_path = r"15+16" + "\\" + str(i)
''' log = open(base_path + r"\log.txt", "r")
log_list = log.readlines()
g = int(log_list[0]); p = float(log_list[1]); c = int(log_list[2]) '''
g =128; c= 12500; p = 0
#print("Grid Size: %d\t p: %f\t Length: %d\t" %(g,p,c))
unspliced_data = []
#This will store the raw unspliced data without any processing from all the CSV files.
r=0
files = glob.glob(base_path + "/*.csv")
for file in files:
if (os.path.getsize(file) > 512):
r+=16
#Filtering for file sizes that are greater than 512 Bytes in size.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1, usecols = (0,1,2,3) )
'''
data_temp resembles:
| t, Correl, k+l, p |
'''
p = data_temp[0,3]
if(len(unspliced_data) == 0):
#First chip off the old block.
unspliced_data = data_temp
else:
unspliced_data = np.concatenate((unspliced_data, data_temp), axis=0)
#Concatanated.
print("Grid Size: %d\t p: %f\t Length: %d\t" %(g,p,c))
a,b = unspliced_data.shape
print("Unspliced Array Size:\t (%d, %d)" %(a,b))
#ko=input("Enter to continue:\t")
yandex(unspliced_data, g, p, c, r, binder)
heado = 'p, A, SD(A), B, SD(B), C, SD(C), lag_0.95, lag_0.9, lag_0.8, lag_0.75, lag_0.7, lag_0.6'
#Header for the CSV file
np.savetxt("PissingAbout15+16.csv", binder, delimiter=',', header=heado, comments='#')
find_stat(binder) # Find final stats
def yandex(unspliced_data, g, p, c, r, bind):
#Plot the shit out of Compton.
os.chdir("../../../figures")
if(os.path.isdir("CrossCor")==False):
os.mkdir("CrossCor")
os.chdir("CrossCor")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("GandU")==False):
os.mkdir("GandU")
os.chdir("GandU")
f=0; hurtlocker=0;
if(g == 128):
unspliced_data[:,0]/= 4
unspliced_data = exxon_split(unspliced_data, c)
unspliced_data[:,0]*= 4
print('First few lines of final matrix:')
L=[]
for x in range(25000,25500):
if(x%r == 0):
print("\n")
if(len(L) > 0):
print("SD of Max Corr for t = %f is:\t %f\n" %( unspliced_data[x-1,0], np.std(L) ) )
L=[]
print("%f \t %6.5f \t %f \t %f" %(tuple(unspliced_data[x,:])))
L.append(unspliced_data[x,1])
hurtlocker= pan.DataFrame(unspliced_data, columns= [r"# Updates (m)", "Max Cross-Correlation", "i", "j"])
x1 =np.transpose(unspliced_data[:,0])
x2= np.transpose(unspliced_data[:,1])
f= sea.lineplot(data=hurtlocker, x=r"# Updates (m)" , y="Max Cross-Correlation", estimator= 'mean', ci='sd', err_style="band")
plt.axvline(x= g*g, color='0.65')
plt.text(g*(g+1),0.8, r'$N^2$ updates',rotation=90, color ='0.7')
plt.axvline(x= 2*g*g, color='0.65')
plt.text(2*g*(g+0.5),0.75,r'$2 \times N^2$ updates',rotation=90, color ='0.7')
plt.axvline(x= 3*g*g, color='0.65')
plt.text(3*g*(g+0.33),0.75,r'$3 \times N^2$ updates',rotation=90, color ='0.7')
popt, pcov = curve_fit(expo, x1, x2, p0= np.asarray([0, 1, -0.05]))
perr = np.sqrt(np.diag(pcov))
print("SD of exponent:\t" +str(perr[2]) + " for p:\t" +str(p))
tukan= (popt[0], popt[1], popt[2], perr[2])
plt.plot(x1, expo(x1, *popt), 'm--', label=r'Fit: $ R_{0,0}[p,t] = %3.2f + %3.2f \times e^{(%7.6f \mp %5.5f) \times m} $ ' % tukan )
plt.legend()
#plt.xlim(g1,g2+20)
#plt.yscale('log', basey= math.e)
#plt.xscale('log', basex= math.e)
#g.xaxis.set_major_formatter(mtick.FuncFormatter(ticks))
#g.yaxis.set_major_formatter(mtick.FuncFormatter(ticks))
f.set_title(r'p = %f, Grid Size (G) = %d, n = %d' %(p,g,r))
plt.savefig("Cross Correlation --- p_%f - Grid Size (G)_%d - n_%d.png" %(p, g, r), dpi=400)
#plt.show()
plt.close()
#Time to get the lag timesteps (normalised by N^2).
lag=[]
iaggo= [0.95, 0.9, 0.8, 0.75, 0.7, 0.6]
for i in iaggo:
lag.append(float((math.log((i - popt[0])/popt[1])/(popt[2]*g*g))))
#Stores the lag.
bind.append([p, popt[0], perr[0], popt[1], perr[1], popt[2], perr[2], lag[0], lag[1], lag[2], lag[3], lag[4], lag[5]])
os.chdir(r"..\..\..\..\analysis\Mass Action\DP")
def find_stat(bind):
os.chdir("../../../figures")
if(os.path.isdir("CrossCor")==False):
os.mkdir("CrossCor")
os.chdir("CrossCor")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("GandU")==False):
os.mkdir("GandU")
os.chdir("GandU")
bind =np.array(bind)
blind = bind[:,0:7]
g= 128; r=16
hurtlocker= | pan.DataFrame(blind, columns= ["p", "A", "SD(A)", "B", "SD(B)", "Decay Rate", "SD(C)"]) | pandas.DataFrame |
# The file will produce result based on closing rank
# -*- coding: utf-8 -*-
"""//@<NAME>
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/github/priyanshgupta1998/Machine_learning/blob/master/Krisko_Assignmnet/coding.ipynb
"""
"""#Krisko Assignment"""
import pandas as pd #import pandas library(inbulid python package) used for data maniputation (like reading,writing file etc.)
data1 = pd.read_excel('Input-Rank-Cutoff-List.xlsx') # read the dataset file( Excel)
data1.to_csv('Train.csv') # convert this .xlsx file into .csv format for better understanding.
#print(data1.shape) # check the order of the table ( rows x colmuns)
column = [] # create a empty list
# create the header for the dataset
for i in range(data1.shape[1]):
column.append('col_'+ str(i))
data = pd.read_csv('Train.csv' , names= column)
#print(data.shape)
#data.head() # display first 5 rows of dataset/table
#arrange all the rows in proper seequence
data.reset_index(inplace=True)
#data.head()
data.drop('index' , axis =1 , inplace =True)
"""#Get the Branch Name"""
app =[]
for i in list(data[data['col_0']=='Branch Name '].index):
app.append(i-1)
#print(len(app) , app)
collg = []
for i in app:
collg.append(data['col_0'][i])
#print(len(collg) , collg)
#17 ['IIT Bombay', 'IIT Delhi', 'IIT Kharagpur', 'IIT Kanpur', 'IIT Madras', 'IIT Roorkee', 'IIT Guwahati', 'IIT Bhubaneshwar', 'IIT Mandi', 'IIT indore', 'IIT Hydrabad', 'IIT Jodhpur', 'IIT Gandhinagar', 'IIT Patna', 'IIT Ropar', 'IIT (BHU) Varanasi', 'ISM Dhanbad']
# create another column with the name of 'COLLEGE_NAME'.
virus = app.copy()
virus += [296]
for i in range(len(virus)-1):
virus[i] = virus[i+1] - virus[i]
del virus[-1]
#print(len(virus) , virus)
# add all the college names
col_name = []
for i in range(len(collg)):
col_name+=[collg[i]]*virus[i]
#print(len(col_name))
data.insert(0,'College_Name',col_name)
#print(data.shape)
#data.head()
# Now new table has been created
#change all the header/column names accordingly
data.rename(columns = {"col_0": "BRANCH_NAME" , "col_1":"GENERAL_OPEN" , "col_2":"GENERAL_CLOSE" , "col_3":"OBC_OPEN" ,"col_4":"OBC_CLOSE" , "col_5":"SC_OPEN"
,"col_6":"SC_CLOSE", "col_7": "ST_OPEN" , "col_8":"ST_CLOSE"} , inplace=True)
#Drop/delete all rows/lines which are having null values
data = data.dropna(how='any',axis=0)
data.drop(0 , axis =0 , inplace =True) # delete first row (redundant)
# anrange data/table in order
data.reset_index(inplace=True)
data.drop('index' , axis =1 , inplace =True)
#print(data.shape)
#data.head(8)
# copy all data into anther table named 'df'
df = data.copy()
#here we have total 103 unique Branches
#df['BRANCH_NAME'].nunique()
#df.info() # it will provide all the information reagarding variable data type , null values , number of columns
# convert all the rank into integer/number from string
df['GENERAL_OPEN'] = df['GENERAL_OPEN'].astype(int)
df['GENERAL_CLOSE'] = df['GENERAL_CLOSE'].astype(int)
df['OBC_OPEN'] = df['OBC_OPEN'].astype(int)
df['OBC_CLOSE'] = df['OBC_CLOSE'].astype(int)
df['SC_OPEN'] = df['SC_OPEN'].astype(int)
df['SC_CLOSE'] = df['SC_CLOSE'].astype(int)
df['ST_OPEN'] = df['ST_OPEN'].astype(int)
df['ST_CLOSE'] = df['ST_CLOSE'].astype(int)
#print(df.shape)
#df.info()
#df.describe() # descrip tion about dataset mean , medium , minimum, maximum , count etc .for columns with numeric values
#df.head(8)
#generate all the ranks for all the colleges for each Department
#1.)for general
rank_gen =[]
branch_gen = []
college_gen = []
categ_gen = []
for i in range(len(df)):
app = []
clg = []
cat= []
branch = []
for j in range(list(df['GENERAL_OPEN'])[i] , list(df['GENERAL_CLOSE'])[i] + 1):
branch.append(list(df['BRANCH_NAME'])[i])
app.append(j)
clg.append(list(df['College_Name'])[i])
cat.append('GENERAL')
rank_gen+=app
branch_gen+=branch
college_gen+=clg
categ_gen+=cat
#2.)for obc
rank_obc =[]
branch_obc = []
college_obc = []
categ_obc = []
for i in range(len(df)):
app = []
clg = []
cat= []
branch = []
for j in range(list(df['OBC_OPEN'])[i] , list(df['OBC_CLOSE'])[i] + 1):
branch.append(list(df['BRANCH_NAME'])[i])
app.append(j)
clg.append(list(df['College_Name'])[i])
cat.append('OBC')
rank_obc+=app
branch_obc+=branch
college_obc+=clg
categ_obc+=cat
#3.)for sc
rank_sc =[]
branch_sc = []
college_sc = []
categ_sc = []
for i in range(len(df)):
app = []
clg = []
cat= []
branch = []
for j in range(list(df['SC_OPEN'])[i] , list(df['SC_CLOSE'])[i] + 1):
branch.append(list(df['BRANCH_NAME'])[i])
app.append(j)
clg.append(list(df['College_Name'])[i])
cat.append('SC')
rank_sc+=app
branch_sc+=branch
college_sc+=clg
categ_sc+=cat
#4.)for st
rank_st =[]
branch_st = []
college_st = []
categ_st = []
for i in range(len(df)):
app = []
clg = []
cat= []
branch = []
for j in range(list(df['ST_OPEN'])[i] , list(df['ST_CLOSE'])[i] + 1):
branch.append(list(df['BRANCH_NAME'])[i])
app.append(j)
clg.append(list(df['College_Name'])[i])
cat.append('ST')
rank_st+=app
branch_st+=branch
college_st+=clg
categ_st+=cat
#Design datafrome for each catefory individually
#1.)for general
data_gen = pd.DataFrame()
data_gen['Department'] = branch_gen
data_gen['INSTITUTES'] = college_gen
data_gen['CATEGORY'] = categ_gen
data_gen['RANK_PREDICTOR'] = rank_gen
#print(data_gen.shape)
#data_gen.head()
#2.)for obc
data_obc = pd.DataFrame()
data_obc['Department'] = branch_obc
data_obc['INSTITUTES'] = college_obc
data_obc['CATEGORY'] = categ_obc
data_obc['RANK_PREDICTOR'] = rank_obc
#print(data_obc.shape)
#data_obc.head()
#3.)for sc
data_sc = pd.DataFrame()
data_sc['Department'] = branch_sc
data_sc['INSTITUTES'] = college_sc
data_sc['CATEGORY'] = categ_sc
data_sc['RANK_PREDICTOR'] = rank_sc
#print(data_sc.shape)
#data_sc.head()
#4.) for st
data_st= | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from estimagic.benchmarking.process_benchmark_results import _clip_histories
from estimagic.benchmarking.process_benchmark_results import _find_first_converged
from estimagic.benchmarking.process_benchmark_results import (
_get_history_as_stacked_sr_from_results,
)
from estimagic.benchmarking.process_benchmark_results import (
_get_history_of_the_parameter_distance,
)
from estimagic.benchmarking.process_benchmark_results import _make_history_monotone
from estimagic.benchmarking.process_benchmark_results import _normalize
from estimagic.benchmarking.process_benchmark_results import (
create_convergence_histories,
)
PROBLEMS = ["prob1", "prob2", "prob3"]
@pytest.fixture
def problem_algo_eval_df():
df = pd.DataFrame()
df["problem"] = ["prob1"] * 8 + ["prob2"] * 6
df["algorithm"] = ["algo1"] * 4 + ["algo2"] * 4 + ["algo1"] * 3 + ["algo2"] * 3
df["n_evaluations"] = [0, 1, 2, 3] * 2 + [0, 1, 2] * 2
return df
def test_find_first_converged(problem_algo_eval_df):
# we can assume monotonicity, i.e. no switch back from True to False
converged = pd.Series(
[ # in the middle
False,
False,
True,
True,
]
+ [ # last entry
False,
False,
False,
True,
]
+ [ # first entry
True,
True,
True,
]
+ [ # not converged
False,
False,
False,
]
)
res = _find_first_converged(converged, problem_algo_eval_df)
expected = pd.Series(
[ # in the middle
False,
False,
True,
False,
]
+ [ # last entry
False,
False,
False,
True,
]
+ [ # first entry
True,
False,
False,
]
+ [ # not converged
False,
False,
False,
]
)
pd.testing.assert_series_equal(res, expected)
def test_normalize_minimize():
start_values = pd.Series([5, 4, 10], index=PROBLEMS)
target_values = pd.Series([1, 0, 0], index=PROBLEMS)
df = | pd.DataFrame() | pandas.DataFrame |
from pyspark.sql.functions import expr, col, lit, year
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def violations_in_year(nyc_data, vyear):
return nyc_data.select('issue_date').filter(year('issue_date') == vyear).count()
def reduction_in_violations(nyc_data, enable_plot=True):
violations_2018 = violations_in_year(nyc_data, 2018)
violations_2019 = violations_in_year(nyc_data, 2019)
violations_2020 = violations_in_year(nyc_data, 2020)
years = [2018, 2019, 2020]
violations = [violations_2018, violations_2019, violations_2020]
if enable_plot:
fig, ax = plt.subplots(1, 1, figsize=(10,5))
ax.set_title("Year Vs No. of violations")
ax.set_xlabel("Year")
ax.set_ylabel("No. of violations")
ax.bar(years, violations, color='blue')
fig.savefig('../output/reduction_in_violations.png')
reduction_2019_2018 = ((violations_2018 - violations_2019)/violations_2018) * 100
reduction_2020_2018 = ((violations_2019 - violations_2020)/violations_2019) * 100
reduction_data = [('Reduction 2019 from 2018', reduction_2019_2018), ('Reduction 2020 from 2019',reduction_2020_2018)]
reduction_pad = pd.DataFrame(reduction_data, columns = ['Reduction Years', 'Reduction'])
return reduction_pad
def season_violation_frequencies(nyc_data, enable_plot=True):
season_bins = nyc_data.withColumn('season', expr("case when month(issue_date) in (12, 1, 2) then 'winter'\
when month(issue_date) in (3, 4, 5) then 'spring' \
when month(issue_date) in (6, 7, 8) then 'summer'\
when month(issue_date) in (9, 10, 11) then 'autumn'\
end"))
season_freq = season_bins.select('season').groupBy('season').agg({'season': 'count'}).withColumnRenamed('count(season)', 'No of tickets')
## plot bar graph for seasons
season_freq_data = season_freq.collect()
seasons = [row['season'] for row in season_freq_data]
frequencies = [row['No of tickets'] for row in season_freq_data]
if enable_plot:
fig, ax = plt.subplots(1, 1, figsize=(10, 5))
ax.set_title("Season Vs No. of violations")
ax.set_xlabel("Season")
ax.set_ylabel("No. of violations")
ax.bar(seasons, frequencies, color=['green', 'cyan','yellow'])
fig.savefig('../output/season_violation_frequencies.png')
return season_freq.toPandas()
def season_violations(season_bins, season):
violation_by_season = season_bins.select('season', 'violation_code')\
.filter(col('season') == season)\
.groupBy('violation_code')\
.agg({'violation_code':'count'})\
.withColumnRenamed('count(violation_code)', 'Freq of Violations')\
.withColumn('season', lit(season))\
.sort('Freq of Violations', ascending=False)\
.take(3)
return violation_by_season
def common_violations_season(nyc_data, enable_plot=True):
season_bins = nyc_data.withColumn('season', expr("case when month(issue_date) in (12, 1, 2) then 'winter'\
when month(issue_date) in (3, 4, 5) then 'spring' \
when month(issue_date) in (6, 7, 8) then 'summer'\
when month(issue_date) in (9, 10, 11) then 'autumn'\
end"))
spring_violations = season_violations(season_bins, 'spring')
winter_violations = season_violations(season_bins, 'winter')
summer_violations = season_violations(season_bins, 'summer')
autumn_violations = season_violations(season_bins, 'autumn')
spring_v = [row['Freq of Violations'] for row in spring_violations]
spring_l = [row['violation_code'] for row in spring_violations]
winter_v = [row['Freq of Violations'] for row in winter_violations]
wintert_l = [row['violation_code'] for row in winter_violations]
summer_v = [row['Freq of Violations'] for row in summer_violations]
summer_l = [row['violation_code'] for row in summer_violations]
autumn_v = [row['Freq of Violations'] for row in autumn_violations]
autumn_l = [row['violation_code'] for row in autumn_violations]
if enable_plot:
x_ticks = ['spring', 'winter', 'summer', 'autumn']
labels = ['Top1', 'Top2', 'Top3']
x = np.arange(len(labels))
width = 0.2
fig, ax = plt.subplots(1, 1, figsize=(10,5))
ax.bar(x-0.2, spring_v, width, color='cyan')
ax.bar(x, winter_v, width, color='orange')
ax.bar(x+0.2, summer_v, width, color='green')
ax.bar(x+0.4, autumn_v, width, color='yellow')
ax.set_xlabel("Seasons")
ax.set_ylabel("Violations")
ax.legend(["spring", "winter", "summer", "autumn"])
fig.savefig('../output/common_violations_season.png')
seasonwise_violations = spring_violations + winter_violations + summer_violations + autumn_violations
pd_seasonwise_violations = | pd.DataFrame(seasonwise_violations, columns = ['Violation Code', 'Frequency', 'Season']) | pandas.DataFrame |
import os
import datetime
import random
import pandas as pd
import numpy as np
from calendar import monthrange
from dateutil.easter import easter
from utilities import get_path, get_config
DAYS = {"MON": 0, "Mon": 0, "Mo": 0, "Montag": 0, "Monday": 0,
"TUE": 1, "Tue": 1, "Di": 1, "Dienstag": 1, "Tuesday": 1,
"WED": 2, "Wed": 2, "Mi": 2, "Mittwoch": 2, "Wednesday": 2,
"THU": 3, "Thu": 3, "Do": 3, "Donnerstag": 3, "Thursday": 3,
"FRI": 4, "Fri": 4, "Fr": 4, "Freitag": 4, "Friday": 4,
"SAT": 5, "Sat": 5, "Sa": 5, "Samstag": 5, "Saturday": 5,
"SUN": 6, "Sun": 6, "So": 6, "Sonntag": 6, "Sunday": 6}
MONTHS = {"Januar": 1,
"Februar": 2,
"März": 3,
"April": 4,
"Mai": 5,
"Juni": 6,
"Juli": 7,
"August": 8,
"September": 9,
"Oktober": 10,
"November": 11,
"Dezember": 12}
MONTHS_REV = {v: k for k, v in MONTHS.items()}
CONFIG = get_config()
def get_all_dates(year, month):
_, max_date = monthrange(year, month)
start = f"{year}-{month:02}-01"
end = f"{year}-{month:02}-{max_date:02}"
out = pd.date_range(start, end)
return out
def get_holidays(year):
new_year = f"{year}-01-01"
womens_day = f"{year}-03-08"
easter_sun = easter(year)
easter_mon = easter_sun + datetime.timedelta(days=1)
easter_fri = easter_sun - datetime.timedelta(days=2)
may_first = f"{year}-05-01"
fathers_day = easter_sun + datetime.timedelta(days=40)
whitsun = easter_sun + datetime.timedelta(days=50)
unification_day = f"{year}-10-03"
christmas_1 = f"{year}-12-25"
christmas_2 = f"{year}-12-26"
new_years_eve = f"{year}-12-31"
out = pd.to_datetime([new_year, womens_day, easter_fri, easter_sun,
easter_mon, may_first, fathers_day, whitsun,
unification_day, christmas_1, christmas_2,
new_years_eve])
return out
def get_work_days(year, month):
all_days = get_all_dates(year, month)
holidays = get_holidays(year)
work_days = pd.to_datetime([d for d in all_days if d not in holidays])
return work_days
def get_penalty(x):
if x > pd.Timedelta('9 hours'):
return pd.Timedelta('45 minutes')
elif x > pd.Timedelta('6 hours'):
return pd.Timedelta('30 minutes')
else:
return pd.Timedelta('0 seconds')
def get_net_working_hours(df):
duration = pd.to_timedelta(df.activity_end) - pd.to_timedelta(df.activity_start)
penalties = duration.apply(get_penalty)
out = (duration - penalties).sum() / pd.Timedelta('1 hour')
return out
def generate_slot():
for h in np.arange(5, 0, -0.25):
yield pd.Timedelta(hours=h)
def strfdelta(tdelta, fmt="{hours:02}:{minutes:02}:{seconds:02}"):
d = {"days": tdelta.days}
d["hours"], rem = divmod(tdelta.seconds, 3600)
d["minutes"], d["seconds"] = divmod(rem, 60)
return fmt.format(**d)
def create_row(indexes, slot, note=None):
idx = indexes[np.random.randint(0, len(indexes))]
start_time = pd.Timedelta(hours=np.random.randint(6, 15))
end_time = start_time + slot
out = pd.DataFrame({"activity_start": strfdelta(start_time),
"activity_end": strfdelta(end_time),
"note": note},
index=[idx])
return out
class Schedule:
template = pd.read_csv(get_path("schedule.csv"), comment='#',
skipinitialspace=True,
parse_dates=["valid_from", "valid_until"],
converters={"day_of_week": lambda x: DAYS.get(x)})
activities = pd.read_csv(get_path("activities.txt"), comment='#',
header=None, names=["title"]).title.to_list()
# Ensure that activities has len of 2 - otherwise 'choice' throws a tantrum
activities += [None, None]
def __init__(self):
self.total_hours = CONFIG["setup"]["total_hours"]
self.year = CONFIG["setup"]["year"]
self.start_sheet = CONFIG["general"]["start_sheet"]
self.fill_missing = CONFIG["fill"]["fill_missing"]
self.avoid_weekends = CONFIG["fill"]["avoid_weekends"]
self.max_overtime = CONFIG["fill"]["max_overtime"]
self.max_undertime = CONFIG["fill"]["max_undertime"]
self.dates = None
def fill(self, year, month):
idx = get_work_days(year, month)
df = | pd.DataFrame(index=idx) | pandas.DataFrame |
import numpy as np
import networkx as nx
import pandas as pd
import random
import string
import scipy.stats
import network_prop
import sys
# for parallel processing
#from joblib import Parallel, delayed
#import multiprocessing
def main(num_reps=10, seed_gene_file='HC_genes/example_seed.tsv',int_file='../interactomes/G_PCnet.gpickle', out_name='ASD',rand_method = 'degree_binning',single_or_double='single'):
'''
Calculate z-scores for heat propagation
Inputs:
num_reps: number of randomizations
seed_gene_file: location of file containing seed genes (see example for format... clunky format due to historical reasons... need to improve)
int_file: location of interactome to use (gpickle format)
out_name: identifier for output files (currently saves in current directory... need to update to allow setting of save location)
rand_method: type of randomization (default = 'degree_binning', alternate method 'degree_ks_test' deprecated)
single_or_double: single network prop or double network prop. (default = 'single'. 'double' is deprecated)
python netprop_zscore.py 10 HC_genes/example_seed.tsv ../interactomes/G_PCnet.gpickle ASD degree_binning single
'''
print('number of randomizations = '+str(num_reps))
print('background interactome = ' + int_file)
print('randomization method = ' + rand_method)
print('single or double = ' + single_or_double)
num_reps = int(num_reps)
# load interactome and select focal interactome
Gint = nx.Graph()
Gint = nx.read_gpickle(int_file)
if 'None' in Gint.nodes():
Gint.remove_node('None')
# load HC genes
HC_genes_temp = pd.read_csv(seed_gene_file,sep='\t',index_col='Unnamed: 0')
seed_HC = [str(g[1:-1]).strip("'") for g in HC_genes_temp['seed_genes'].tolist()[0][1:-1].split(', ')]
print(seed_gene_file+':')
print(len(seed_HC))
seed_HC = list(np.intersect1d(Gint.nodes(),seed_HC))
print(len(seed_HC))
# calculate the z-score
# calc Wprime from Gint
Wprime = network_prop.normalized_adj_matrix(Gint,conserve_heat=True)
if single_or_double=='single': # calculate z-scores from a single set of seed genes
print('calculating z-scores: '+seed_gene_file)
z_seed,Fnew_rand_seed = calc_zscore_heat(Gint,Wprime,seed_HC,num_reps=num_reps,rand_method=rand_method)
z_seed.to_csv('z_'+out_name+'_'+str(num_reps)+'_reps_'+rand_method+'.tsv',sep='\t')
#pd.DataFrame(Fnew_rand_seed).to_csv('Fnew_'+outname+'_rand'+str(num_reps)+'_reps_'+rand_method+'.tsv',sep='\t')
elif single_or_double=='double': # calculate z-scores from two sets of seed genes:
# --- keeping for completeness, but currently not functional ----
print('calculating ASD-CHD z-scores')
z_ASD_CHD,Fnew_rand_ASD_CHD = calc_zscore_heat_double(Gint,Wprime,ASD_HC,CHD_HC,num_reps=num_reps,rand_method = rand_method)
z_ASD_CHD.to_csv('z_'+out_name+'_'+str(num_reps)+'_reps_'+rand_method+'.tsv',sep='\t')
def calc_zscore_heat(Gint,Wprime,genes_D1,num_reps=10,ks_sig = 0.3,rand_method = 'degree_binning'):
'''
Helper function to calculate the z-score of heat values from one input seet of genes
rand_method = 'degree_ks_test', or 'degree_binning'. select the type of randomization
'''
seed_D1 = list(np.intersect1d(list(genes_D1),Gint.nodes()))
Fnew_D1 = network_prop.network_propagation(Gint,Wprime,seed_D1,alpha=.5,num_its=20)
num_focal_edges=len(nx.subgraph(Gint,seed_D1).edges())
Fnew_rand_D1 = np.zeros([num_reps,len(Fnew_D1)])
if rand_method == 'degree_ks_test':
for r in range(num_reps):
if (r%50)==0:
print(r)
# UPDATE 8/23/17 -- replace with randomly selecting seed nodes, checking for degree distribution equivalence
p=0
# resample until degree distributions are not significantly different
while p<ks_sig:
seed_D1_random = Gint.nodes()
np.random.shuffle(seed_D1_random)
seed_D1_random = seed_D1_random[0:len(seed_D1)]
ks_stat,p=scipy.stats.ks_2samp(pd.Series(Gint.degree(seed_D1)),pd.Series(Gint.degree(seed_D1_random)))
Fnew_rand_tmp = network_prop.network_propagation(Gint,Wprime,seed_D1_random,alpha=.5,num_its=20)
Fnew_rand_tmp.loc[seed_D1_random]=np.nan # set seeds to nan so they don't bias results
Fnew_rand_D1[r] = Fnew_rand_tmp.loc[Fnew_D1.index.tolist()]
elif rand_method == 'degree_binning':
bins = get_degree_binning(Gint,10)
min_degree, max_degree, genes_binned = zip(*bins)
bin_df = | pd.DataFrame({'min_degree':min_degree,'max_degree':max_degree,'genes_binned':genes_binned}) | pandas.DataFrame |
import numpy as np
import pandas as pd
from .base_test_class import DartsBaseTestClass
from darts.timeseries import TimeSeries
from darts.utils import timeseries_generation as tg
from darts.metrics import mape
from darts.models import (
NaiveSeasonal,
ExponentialSmoothing,
ARIMA,
Theta,
FourTheta,
FFT,
VARIMA
)
from ..utils.utils import SeasonalityMode, TrendMode, ModelMode
from ..logging import get_logger
from ..datasets import AirPassengersDataset, IceCreamHeaterDataset
logger = get_logger(__name__)
try:
from darts.models import RandomForest, LinearRegressionModel
TORCH_AVAILABLE = True
except ImportError:
logger.warning('Torch not installed - some local forecasting models tests will be skipped')
TORCH_AVAILABLE = False
# (forecasting models, maximum error) tuples
models = [
(ExponentialSmoothing(), 5.6),
(ARIMA(12, 2, 1), 10),
(ARIMA(1, 1, 1), 40),
(Theta(), 11.3),
(Theta(1), 20.2),
(Theta(-1), 9.8),
(FourTheta(1), 20.2),
(FourTheta(-1), 9.8),
(FourTheta(trend_mode=TrendMode.EXPONENTIAL), 5.5),
(FourTheta(model_mode=ModelMode.MULTIPLICATIVE), 11.4),
(FourTheta(season_mode=SeasonalityMode.ADDITIVE), 14.2),
(FFT(trend="poly"), 11.4),
(NaiveSeasonal(), 32.4)
]
if TORCH_AVAILABLE:
models += [(LinearRegressionModel(lags=12), 11.0),
(RandomForest(lags=12, n_estimators=200, max_depth=3), 15.5)]
# forecasting models with exogenous variables support
multivariate_models = [
(VARIMA(1, 0, 0), 55.6),
(VARIMA(1, 1, 1), 57.0),
]
dual_models = [ARIMA()]
try:
from ..models import Prophet
models.append((Prophet(), 13.5))
except ImportError:
logger.warning("Prophet not installed - will be skipping Prophet tests")
try:
from ..models import AutoARIMA
models.append((AutoARIMA(), 12.2))
dual_models.append(AutoARIMA())
PMDARIMA_AVAILABLE = True
except ImportError:
logger.warning("pmdarima not installed - will be skipping AutoARIMA tests")
PMDARIMA_AVAILABLE = False
try:
from ..models import TCNModel
TORCH_AVAILABLE = True
except ImportError:
logger.warning("Torch not installed - will be skipping Torch models tests")
TORCH_AVAILABLE = False
class LocalForecastingModelsTestCase(DartsBaseTestClass):
# forecasting horizon used in runnability tests
forecasting_horizon = 5
# dummy timeseries for runnability tests
np.random.seed(1)
ts_gaussian = tg.gaussian_timeseries(length=100, mean=50)
# real timeseries for functionality tests
ts_passengers = AirPassengersDataset().load()
ts_pass_train, ts_pass_val = ts_passengers.split_after(pd.Timestamp("19570101"))
# real multivariate timeseries for functionality tests
ts_ice_heater = IceCreamHeaterDataset().load()
ts_ice_heater_train, ts_ice_heater_val = ts_ice_heater.split_after(split_point=0.7)
def test_save_model_parameters(self):
# model creation parameters were saved before. check if re-created model has same params as original
for model, _ in models:
self.assertTrue(model._model_params, model.untrained_model()._model_params)
def test_models_runnability(self):
for model, _ in models:
model.fit(self.ts_gaussian)
prediction = model.predict(self.forecasting_horizon)
self.assertTrue(len(prediction) == self.forecasting_horizon)
def test_models_performance(self):
# for every model, check whether its errors do not exceed the given bounds
for model, max_mape in models:
np.random.seed(1) # some models are probabilist...
model.fit(self.ts_pass_train)
prediction = model.predict(len(self.ts_pass_val))
current_mape = mape(prediction, self.ts_pass_val)
self.assertTrue(current_mape < max_mape, "{} model exceeded the maximum MAPE of {}. "
"with a MAPE of {}".format(str(model), max_mape, current_mape))
def test_multivariate_models_performance(self):
# for every model, check whether its errors do not exceed the given bounds
for model, max_mape in multivariate_models:
np.random.seed(1)
model.fit(self.ts_ice_heater_train)
prediction = model.predict(len(self.ts_ice_heater_val))
current_mape = mape(prediction, self.ts_ice_heater_val)
self.assertTrue(current_mape < max_mape, "{} model exceeded the maximum MAPE of {}. "
"with a MAPE of {}".format(str(model), max_mape, current_mape))
def test_multivariate_input(self):
es_model = ExponentialSmoothing()
ts_passengers_enhanced = self.ts_passengers.add_datetime_attribute("month")
with self.assertRaises(AssertionError):
es_model.fit(ts_passengers_enhanced)
es_model.fit(ts_passengers_enhanced["#Passengers"])
with self.assertRaises(KeyError):
es_model.fit(ts_passengers_enhanced["2"])
def test_exogenous_variables_support(self):
for model in dual_models:
# Test models runnability
model.fit(self.ts_gaussian, future_covariates=self.ts_gaussian)
prediction = model.predict(
self.forecasting_horizon,
future_covariates=tg.gaussian_timeseries(
length=self.forecasting_horizon,
start=self.ts_gaussian.end_time() + self.ts_gaussian.freq))
self.assertTrue(len(prediction) == self.forecasting_horizon)
# Test mismatch in length between exogenous variables and forecasting horizon
with self.assertRaises(ValueError):
model.predict(
self.forecasting_horizon,
future_covariates=tg.gaussian_timeseries(length=self.forecasting_horizon - 1))
# Test mismatch in time-index/length between series and exogenous variables
with self.assertRaises(ValueError):
model.fit(self.ts_gaussian, future_covariates=self.ts_gaussian[:-1])
with self.assertRaises(ValueError):
model.fit(self.ts_gaussian[1:], future_covariates=self.ts_gaussian[:-1])
def test_dummy_series(self):
values = np.random.uniform(low=-10, high=10, size=100)
ts = TimeSeries.from_dataframe( | pd.DataFrame({"V1": values}) | pandas.DataFrame |
import numpy as np
import pandas as pd
from cascade_at.core.log import get_loggers
from cascade_at.dismod.api.fill_extract_helpers import utils
from cascade_at.dismod.constants import DensityEnum, IntegrandEnum, \
INTEGRAND_TO_WEIGHT
LOG = get_loggers(__name__)
DEFAULT_DENSITY = ["uniform", 0, -np.inf, np.inf]
def prep_data_avgint(df: pd.DataFrame, node_df: pd.DataFrame, covariate_df: pd.DataFrame):
"""
Preps both the data table and the avgint table by
mapping locations to nodes and covariates to names.
Putting it in the same function because it does the same stuff,
but data and avgint need to be called separately because dismod requires
different columns.
Parameters
----------
df
The data frame to map
node_df
The node table from dismod db
covariate_df
The covariate table from dismod db
"""
data = df.copy()
data = utils.map_locations_to_nodes(df=data, node_df=node_df)
data = utils.map_covariate_names(df=data, covariate_df=covariate_df)
data.reset_index(inplace=True, drop=True)
return data
def construct_data_table(df: pd.DataFrame, node_df: pd.DataFrame,
covariate_df: pd.DataFrame, ages: np.ndarray, times: np.ndarray):
"""
Constructs the data table from input df.
Parameters
----------
df
data frame of inputs that have been prepped for dismod
node_df
the dismod node table
covariate_df
the dismod covariate table
ages
times
"""
LOG.info("Constructing data table.")
data = df.copy()
data = prep_data_avgint(
df=data,
node_df=node_df,
covariate_df=covariate_df
)
data["data_name"] = data.index.astype(str)
data["density_id"] = data["density"].apply(lambda x: DensityEnum[x].value)
data["integrand_id"] = data["measure"].apply(lambda x: IntegrandEnum[x].value)
data["weight_id"] = data["measure"].apply(lambda x: INTEGRAND_TO_WEIGHT[x].value)
data["subgroup_id"] = 0
columns = data.columns
data = data[[
'data_name', 'integrand_id', 'density_id', 'node_id', 'weight_id', 'subgroup_id',
'hold_out', 'meas_value', 'meas_std', 'eta', 'nu',
'age_lower', 'age_upper', 'time_lower', 'time_upper'
] + [x for x in columns if x.startswith('x_')]]
data = data.loc[(data.time_lower >= times.min()) & (data.time_upper <= times.max())].copy()
data = data.loc[(data.age_lower >= ages.min()) & (data.age_upper <= ages.max())].copy()
return data
def construct_gbd_avgint_table(df: pd.DataFrame,
node_df: pd.DataFrame,
covariate_df: pd.DataFrame,
integrand_df: pd.DataFrame,
ages: np.ndarray,
times: np.ndarray) -> pd.DataFrame:
"""
Constructs the avgint table using the output df
from the inputs.to_avgint() method.
Parameters
----------
df
The data frame to construct the avgint table from, that has things like
ages, times, nodes (locations), sexes, etc.
node_df
dismod node data frame
covariate_df
dismod covariate data frame
integrand_df
dismod integrand data frame
ages
array of ages for the model
times
array of times for the model
"""
LOG.info("Constructing the avgint table.")
avgint = df.copy()
avgint = prep_data_avgint(
df=avgint,
node_df=node_df,
covariate_df=covariate_df
)
avgint_df = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from pathlib import Path
from datetime import datetime
import random
import sys
from sklearn.model_selection import ParameterSampler
from scipy.stats import randint as sp_randint
from scipy.stats import uniform
from functions import (
under_over_sampler,
classifier_train,
classifier_train_manual,
make_generic_df,
get_xy_from_df,
plot_precision_recall_vs_threshold,
plot_precision_vs_recall,
)
from classification_methods import (
random_forest_classifier,
# knn_classifier,
# logistic_regression,
# sgd_classifier,
# ridge_classifier,
# svm_classifier,
# gaussian_nb_classifier,
xgboost_classifier,
)
# stop warnings from sklearn
# https://stackoverflow.com/questions/32612180/eliminating-warnings-from-scikit-learn
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
# to profile script for memory usage, use:
# /usr/bin/time -f "mem=%K RSS=%M elapsed=%E cpu.sys=%S .user=%U" python random_search_run.py
# from https://unix.stackexchange.com/questions/375889/unix-command-to-tell-how-much-ram-was-used-during-program-runtime
#############################################################################
# RANDOM SEARCH PARAMETERS
# fill these out to set parameters for the random search
# set a seed for the parameter sampler
sampler_seed = random.randint(0, 2 ** 16)
no_iterations = 30000
# create list of tools that we want to look over
# these are only the tools that we know we have wear-failures [57, 54, 32, 36, 22, 8, 2]
# tool_list_all = [57, 54, 32, 36, 22, 8, 2]
tool_list_all = [54]
# tool_list_some = [57, 32, 22, 8, 2, 36]
tool_list_some = []
# other parameters
scaler_methods = ["standard", "min_max"]
imbalance_ratios = [0.1,0.5,0.8,1]
average_across_indices = [True,False]
# list of classifiers to test
classifier_list_all = [
random_forest_classifier,
# knn_classifier,
# logistic_regression,
# sgd_classifier,
# ridge_classifier,
# svm_classifier,
# gaussian_nb_classifier,
xgboost_classifier,
]
over_under_sampling_methods = [
"random_over",
"random_under",
"random_under_bootstrap",
"smote",
"adasyn",
None,
]
# no cut indices past 9 that are valid
index_list = [
list(range(0, 10)),
list(range(1, 10)),
list(range(1, 9)),
list(range(1, 8)),
list(range(2, 8)),
list(range(3, 7)),
list(range(2, 9)),
list(range(2, 10)),
]
#############################################################################
# test and train folds
# failures for tool 54 on following dates:
# 2018-11-15
# 2019-01-28
# 2019-01-29
# 2019-01-30
# 2019-02-04
# 2019-02-07
# 2019-02-08
# 2019-09-11 - These are resampled into pickle files (in case that matters)
# 2019-11-27
# 2019-01-23 - These are from January data without speed
# update 8/6/2020: does not look like we use the 'test_fold'
# therefore, I have divided the dates into the other three folds
test_fold = [
"2018-10-23",
"2018-11-15", # failures
"2018-11-16",
"2018-11-19",
"2019-09-11", # failures
"2019-09-13",
]
train_fold_1 = [
"2018-11-21",
"2019-01-25",
"2019-01-28", # failures
"2019-11-27", # failures
"2019-01-23", # failures, from Jan without speed
"2019-05-03",
"2019-09-11", # failures
"2019-09-13",
]
train_fold_2 = [
"2019-01-29", # failures
"2019-01-30", # failures
"2019-02-01",
"2019-02-08", # failures
"2019-09-10",
"2019-09-12",
"2018-11-20",
"2019-02-11",
"2019-01-24", # i forgot this one earlier
"2019-05-04",
"2018-11-16",
"2018-11-19",
]
train_fold_3 = [
"2019-02-04", # failures
"2019-02-05",
"2019-02-07", # failures
"2019-05-06",
"2019-01-22", # from Jan without speed
"2018-10-23",
"2018-11-15", # failures
]
train_folds = [train_fold_1, train_fold_2, train_fold_3]
train_dates_all = [date for sublist in train_folds for date in sublist]
#############################################################################
# start by loading the csv with the features
# file_folder = Path(
# "/home/tim/Documents/Checkfluid-Project/data/processed/"
# "_tables/low_levels_labels_created_2020-03-11"
# )
# for HPC
file_folder = Path(
"/home/tvhahn/projects/def-mechefsk/tvhahn/_tables/low_levels_labels_created_2020-03-11/"
)
file = file_folder / "low_level_labels_created_2020.03.11_v3_updated_2020.08.06.csv"
df = pd.read_csv(file)
# sort the values by date and index so that it is reproducible
df = df.sort_values(by=["unix_date", "tool", "index"])
# replace NaN's in failed columns with 0
df["failed"].fillna(
0, inplace=True, downcast="int"
) # replace NaN in 'failed' col with 0
# function to convert pandas column to datetime format
def convert_to_datetime(cols):
unix_date = cols[0]
value = datetime.fromtimestamp(unix_date)
return value
# apply 'date_ymd' column to dataframe
df["date"] = df[["unix_date"]].apply(convert_to_datetime, axis=1)
# convert to a period, and then string
df["date_ymd"] = pd.to_datetime(df["date"], unit="s").dt.to_period("D").astype(str)
# create train set
df_train = df[df["date_ymd"].isin(train_dates_all)].reset_index(drop=True).copy()
#############################################################################
# build the parameters to search over
# start with building the generic feature list which we will sample from
feat_generic_all = []
for feat in list(df_train.columns):
if "sub" in feat:
feat_generic_all.append(feat.replace("_sub", ""))
else:
pass
# parameter dictionary for random sampler to go over
parameters_sample_dict = {
# "no_tools": sp_randint(0, len(tool_list_some)),
"no_tools": [0],
"no_feat": sp_randint(3, 38), # sp_randint(1, len(feat_generic_all))
"classifier_used": classifier_list_all,
"average_across_index": average_across_indices,
"uo_method": over_under_sampling_methods,
"scaler_method": scaler_methods,
"parameter_sampler_random_int": sp_randint(0, 2 ** 16),
"imbalance_ratio": imbalance_ratios,
# additional parameters to narrow down random search
"index_list": index_list,
}
# generate the list of parameters to sample over
p_list = list(
ParameterSampler(
parameters_sample_dict, n_iter=no_iterations, random_state=sampler_seed
)
)
#############################################################################
# run models with each of the parameters
date_time = datetime.now().strftime("%Y.%m.%d-%H.%M.%S")
for k, p in enumerate(p_list):
# set random.seed
random.seed(p["parameter_sampler_random_int"])
# get specific parameters
clf_name = str(p["classifier_used"]).split(" ")[1]
tool_list = sorted(
random.sample(tool_list_some, p["no_tools"])
+ [54])
# tool_list = sorted(
# [54]
# + random.sample([36], random.randint(0, 1))
# )
feat_list = sorted(random.sample(feat_generic_all, p["no_feat"]))
indices_to_keep = p["index_list"]
to_avg = p["average_across_index"]
uo_method = p["uo_method"]
# if svm, need to prevent too large a dataset, thus will only use undersampling
if clf_name == "svm_classifier":
uo_method = random.sample(["random_under", "random_under_bootstrap"], 1)
imbalance_ratio = p["imbalance_ratio"]
scaler_method = p["scaler_method"]
parameter_sampler_random_int = p["parameter_sampler_random_int"]
clf_function = p["classifier_used"]
# build dictionary to store parameter results and other info
parameter_values = {
"clf_name": clf_name,
"tool_list": tool_list,
"feat_list": feat_list,
"indices_to_keep": indices_to_keep,
"info_no_samples": None,
"info_no_failures": None,
"info_no_feat": p["no_feat"],
"to_average": to_avg,
"uo_method": uo_method,
"imbalance_ratio": imbalance_ratio,
"scaler_method": scaler_method,
"parameter_sampler_seed": parameter_sampler_random_int,
"initial_script_seed": sampler_seed,
}
# prepare the data table
X_train, y_train, df_ymd_only = get_xy_from_df(
df_train,
tool_list=tool_list,
indices_to_keep=indices_to_keep,
to_average=to_avg,
generic_feat_list=feat_list,
)
# check if empty X_train
len_data = len(y_train)
# check if not enough labels in y_train
no_label_failed = np.sum(y_train)
seed_indexer = 0
while len_data < 20 or no_label_failed < 15:
random.seed(p["parameter_sampler_random_int"] + seed_indexer)
tool_list = sorted(
random.sample(tool_list_some, p["no_tools"])
+ random.sample([54], random.randint(1, 2))
)
X_train, y_train, df_ymd_only = get_xy_from_df(
df_train,
tool_list=tool_list,
indices_to_keep=indices_to_keep,
to_average=to_avg,
generic_feat_list=feat_list,
)
parameter_values["tool_list"] = tool_list
len_data = len(y_train)
no_label_failed = np.sum(y_train)
seed_indexer += 1
parameter_values["info_no_samples"] = len_data
parameter_values["info_no_failures"] = no_label_failed
# save the general parameters values
df_gpam = pd.DataFrame.from_dict(parameter_values, orient="index").T
# instantiate the model
clf, classifier_parameters = clf_function(parameter_sampler_random_int)
# save classifier parameters into dataframe
df_cpam = | pd.DataFrame.from_dict(classifier_parameters, orient="index") | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
import itertools
import numpy as np
import pytest
from pandas.compat import u
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Period, Series, Timedelta, date_range)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestDataFrameReshape(TestData):
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
tm.assert_frame_equal(pivoted, expected)
# name tracking
assert pivoted.index.name == 'index'
assert pivoted.columns.name == 'columns'
# don't specify values
pivoted = frame.pivot(index='index', columns='columns')
assert pivoted.index.name == 'index'
assert pivoted.columns.names == (None, 'columns')
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with pytest.raises(ValueError, match='duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame({}, columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame()
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
tm.assert_index_equal(result.columns, Index(['A', 'B'], name=0))
def test_pivot_index_none(self):
# gh-3962
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data).set_index('index')
result = frame.pivot(columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns='columns')
expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),
('values', 'Two')],
names=[None, 'columns'])
expected.index.name = 'index'
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.name == 'index'
assert result.columns.names == (None, 'columns')
expected.columns = expected.columns.droplevel(0)
result = frame.pivot(columns='columns', values='values')
expected.columns.name = 'columns'
tm.assert_frame_equal(result, expected)
def test_stack_unstack(self):
df = self.frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, df)
assert_frame_equal(unstacked_df['bar'], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, df)
assert_frame_equal(unstacked_cols_df['bar'].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, 'a', 'b'], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0],
columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(1, index=MultiIndex.from_product([levels[0],
levels[2]]),
columns=levels[1])
assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[['a', 'b']].stack(1)
expected = expected[['a', 'b']]
assert_frame_equal(result, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack(fill_value=-1)
expected = DataFrame({'a': [1, -1, 5], 'b': [2, 4, -1]},
index=['x', 'y', 'z'], dtype=np.int16)
assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame({'a': [1, 0.5, 5], 'b': [2, 4, 0.5]},
index=['x', 'y', 'z'], dtype=np.float)
assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame({'x': ['a', 'a', 'b'],
'y': ['j', 'k', 'j'],
'z': [0, 1, 2],
'w': [0, 1, 2]}).set_index(['x', 'y', 'z'])
unstacked = df.unstack(['x', 'y'], fill_value=0)
key = ('<KEY>')
expected = unstacked[key]
result = pd.Series([0, 0, 2], index=unstacked.index, name=key)
assert_series_equal(result, expected)
stacked = unstacked.stack(['x', 'y'])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
assert_frame_equal(result, df)
# From a series
s = df['w']
result = s.unstack(['x', 'y'], fill_value=0)
expected = unstacked['w']
assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list('AB'), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
# From a mixed type dataframe
df['A'] = df['A'].astype(np.int16)
df['B'] = df['B'].astype(np.float64)
result = df.unstack(fill_value=-1)
expected['A'] = expected['A'].astype(np.int16)
expected['B'] = expected['B'].astype(np.float64)
assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.float)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = pd.date_range('2012-01-01', periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [dv[0], pd.NaT, dv[3]],
'b': [dv[1], dv[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame({'a': [dv[0], dv[0], dv[3]],
'b': [dv[1], dv[2], dv[0]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [td[0], pd.NaT, td[3]],
'b': [td[1], td[2], pd.NaT]},
index=['x', 'y', 'z'])
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale, LabelEncoder
from sklearn.linear_model import LinearRegression
############################################
# Section 1 - Importing and Combining Data
############################################
np.random.seed(12345)
# Getting company names and sector labels
df = pd.read_csv('D:/ML/book1.csv', header=0, index_col='Ticker')
to_keep = ['Name', 'Sector']
dfA = df[to_keep]
# Getting financial ratio data
dfB = pd.read_csv('D:/ML/ratios.csv', header=0, index_col='Ticker')
ratioNames = np.array(dfB.columns.values)
# Concatenating dataframes to get primary dataset
companyData = dfA.join(dfB).drop(['BF-B', 'CTVA', 'FRC'])
companyData = companyData.fillna(0)
clusterData = np.array(companyData)
companies = np.array(companyData.index)
############################################
# Section 2 - Computing Ranked Measures
############################################
# Storing sector-wise means of ratios
dt = companyData.groupby('Sector').mean()
# Function to get industry-relative ratios
def getRelative(ratioData):
ratios = ratioData[:, 2:]
sector = ratioData[:, 1]
for i in range(len(sector)):
# Get sector of company and sector-wise averages of ratios
ind = sector[i]
indAvgs = dt.loc[ind]
for j in range(len(indAvgs)):
ratios[i, j] = ratios[i, j] / indAvgs[j]
return ratios
# Storing the relative ratios for future use
finalData = pd.DataFrame(getRelative(clusterData), index=companies, columns=ratioNames).fillna(0)
####################################################
# Section 3 - Identifying Optimal Number of Clusters
###################################################
# Loading the feature dataset
X = np.array(finalData)
comp = clusterData[:, 1]
# Encoding output labels
lab = LabelEncoder()
labels = lab.fit_transform(comp)
# Algorithm to compare cluster sizes (adapted from Scikit-learn's documentation)
def bench_k_means(classifier, name, data):
# Prints labels of measures used
print('init\t\ttime\tinertia\thomo\tcompl\tv-meas\tARI\tAMI\tsilhouette')
t0 = time()
classifier.fit(data)
print('%-9s\t%.2fs\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (name, (time() - t0), classifier.inertia_,
metrics.homogeneity_score(labels, classifier.labels_),
metrics.completeness_score(labels, classifier.labels_),
metrics.v_measure_score(labels, classifier.labels_),
metrics.adjusted_rand_score(labels, classifier.labels_),
metrics.adjusted_mutual_info_score(labels, classifier.labels_, average_method='arithmetic'),
metrics.silhouette_score(data, classifier.labels_, metric='euclidean', sample_size=497))
)
return classifier.inertia_
# List to store inertia for Elbow Method of cluster size identification
wcss = []
# Comparing multiple values of k (chose to use 4)
for i in range(2, 12):
print("Calculating measurement scores for Cluster Size {}".format(i))
cluster = KMeans(n_clusters=i, init='k-means++', n_init=10, max_iter=300,
precompute_distances=True, random_state=3)
inert = bench_k_means(classifier=cluster, name = "k-means++", data = X)
print('')
wcss.append(inert)
# Plotting inertia for different values of k to identify 'elbow'
plt.figure(figsize=(10, 10))
plt.plot(range(2, 12), wcss)
plt.title('Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.xticks(range(1, 12))
plt.show()
# Function to visualise two most populous clusters on two random axes
def plotClusters(kmeans_out, dimA, dimB):
(values, counts) = np.unique(kmeans_out, return_counts=True)
filled = np.stack((values, counts), axis=1)
sortedFill = filled[filled[:, 1].argsort()]
# Pick the last two clusters, i.e. most populous
for i in [-1, -2]:
cID = sortedFill[i][0]
if i == -1:
plt.scatter(X[kmeans_out == cID, dimA], X[kmeans_out == cID, dimB], s=50, c='lightblue',
marker='o', edgecolor='black', label='cluster 1')
else:
plt.scatter(X[kmeans_out == cID, dimA], X[kmeans_out == cID, dimB], s=50, c='lightgreen',
marker='s', edgecolor='black', label='cluster 2')
plt.legend(scatterpoints=1)
plt.grid()
plt.xlabel('Dimension A')
plt.ylabel('Dimension B')
plt.title('Visual Decomposition of Clustering')
plt.xlim(-1, 2)
plt.ylim(-1, 2)
plt.show()
return sortedFill
# Cluster Size chosen from previous section
size = 7
# Visualising k-means using two random axes
kmeans = KMeans(n_clusters=size, init='random', n_init=20, max_iter=300,
precompute_distances=True, random_state=3)
kmeans_out = kmeans.fit_predict(X)
idlist = plotClusters(kmeans_out, 1, 7)
####################################################
# Section 4 - Using PCA to visualise clusters
###################################################
# Fitting K-means to reduced-form data
pca = PCA(n_components=size-1).fit_transform(X)
cluster = KMeans(init='random', n_clusters=size, n_init=20)
pca_out = cluster.fit_predict(pca)
plotClusters(pca_out, 0, 1)
clusterID = | pd.DataFrame(kmeans_out, index=companies, columns=['ClusterID']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
def data_filling():
train_data = pd.read_csv("./data/train.csv")
test_data = pd.read_csv("./data/test.csv")
train_data = fill_holiday(train_data)
test_data = fill_holiday(test_data)
train_data.to_csv('./processed/train_data_filled.csv', index=False)
test_data.to_csv('./processed/test_data_filled.csv', index=False)
def fill_holiday(data):
for i in range(len(data)):
if data.loc[i, 'holiday'] != 'None':
holiday_list = [data.loc[i, 'holiday']] * len(
data.loc[data['timestamp'].str.contains(data.loc[i, 'timestamp'].split(' ')[0])])
data.loc[data['timestamp'].str.contains(
data.loc[i, 'timestamp'].split(' ')[0]), 'holiday'] = holiday_list
return data
def data_processing():
train_data = pd.read_csv("processed/train_data_filled.csv")
test_data = pd.read_csv("processed/test_data_filled.csv")
x_train = train_data.drop('traffic_volume', axis=1)
y_train = train_data['traffic_volume']
x_test = test_data
all_data = pd.concat([x_train, x_test])
all_data['timestamp'] = pd.to_datetime(all_data['timestamp'])
all_data['weekday'] = all_data['timestamp'].dt.weekday
all_data['month'] = all_data['timestamp'].dt.month
# all_data['day_of_month'] = all_data['timestamp'].dt.day
all_data['hour'] = all_data['timestamp'].dt.hour
all_data = all_data.drop(['timestamp'], axis=1)
holiday_dummy = | pd.get_dummies(all_data['holiday']) | pandas.get_dummies |
import pandas as pd
from pandas import DataFrame
import pandas._testing as tm
class TestConcatSort:
def test_concat_sorts_columns(self, sort):
# GH-4588
df1 = DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"])
df2 = DataFrame({"a": [3, 4], "c": [5, 6]})
# for sort=True/None
expected = DataFrame(
{"a": [1, 2, 3, 4], "b": [1, 2, None, None], "c": [None, None, 5, 6]},
columns=["a", "b", "c"],
)
if sort is False:
expected = expected[["b", "a", "c"]]
# default
with tm.assert_produces_warning(None):
result = pd.concat([df1, df2], ignore_index=True, sort=sort)
tm.assert_frame_equal(result, expected)
def test_concat_sorts_index(self, sort):
df1 = DataFrame({"a": [1, 2, 3]}, index=["c", "a", "b"])
df2 = DataFrame({"b": [1, 2]}, index=["a", "b"])
# For True/None
expected = DataFrame(
{"a": [2, 3, 1], "b": [1, 2, None]},
index=["a", "b", "c"],
columns=["a", "b"],
)
if sort is False:
expected = expected.loc[["c", "a", "b"]]
# Warn and sort by default
with tm.assert_produces_warning(None):
result = pd.concat([df1, df2], axis=1, sort=sort)
tm.assert_frame_equal(result, expected)
def test_concat_inner_sort(self, sort):
# https://github.com/pandas-dev/pandas/pull/20613
df1 = DataFrame(
{"a": [1, 2], "b": [1, 2], "c": [1, 2]}, columns=["b", "a", "c"]
)
df2 = DataFrame({"a": [1, 2], "b": [3, 4]}, index=[3, 4])
with | tm.assert_produces_warning(None) | pandas._testing.assert_produces_warning |
import os.path as osp
import matplotlib.pyplot as plt
# from bokeh.palettes import Category20
from sklearn.manifold import TSNE
import pandas as pd
def tsne(feature_map, results, component_num, dir_path):
# fig, ax = plt.subplots()
# y_pred, y, conf, img_name = results
y_pred, y = results
model_tsne = TSNE(n_components=component_num, random_state=40)
model_tsne.fit(feature_map)
embeddings = model_tsne.embedding_
df = | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": | Index([0, 2], dtype="int64") | pandas.Index |
import pandas as pd
import numpy as np
from multiprocessing import Pool
import tqdm
import sys
import gzip as gz
from tango.prepare import init_sqlite_taxdb
def translate_taxids_to_names(res_df, reportranks, name_dict):
"""
Takes a pandas dataframe with ranks as columns and contigs as rows and taxids as values and translates taxids
to names column by column using a taxid->name dictionary
Parameters
----------
res_df: pandas.DataFrame
Results with taxids
reportranks: list
List of taxonomic ranks to report results for
name_dict: dictionary
Dictionary mapping taxids -> names
Returns
-------
res: pandas.DataFrame
Dataframe with names instead of taxids
"""
res = {}
for rank in reportranks:
res[rank] = [name_dict[taxid] for taxid in res_df.loc[:,rank]]
res = pd.DataFrame(res)
res.index = res_df.index
res = res.loc[:, reportranks]
return res
def get_thresholds(df, top=10):
"""
Here bit-score thresholds are calculated per query an returned in a dictionary.
The pandas DataFrame is first sorted by bitscore (high to low), then grouped by query, then for the first entry
per query the top% of the best hit is calculated and converted to dictionary.
Parameters
----------
df: pandas.DataFrame
DataFrame of diamond results
top: int
Percentage range of top bitscore
Returns
-------
thresholds: dict
Dictionary with queries as keys and bitscore thresholds as values
"""
thresholds = (df.sort_values("bitscore", ascending=False).groupby(level=0).first().bitscore * (
(100 - top)) / 100).to_dict()
return thresholds
def get_rank_thresholds(ranks, thresholds):
"""
Constructs dictionary of rank-specific thresholds
Parameters
----------
ranks: list
Taxonomic ranks to assign
thresholds: list
Thresholds for taxonomic ranks
Returns
-------
Dictionary of thresholds
"""
t_len, r_len = len(thresholds), len(ranks)
if t_len != r_len:
sys.exit("ERROR: Number of taxonomic ranks ({}) and number of thresholds ({}) differ\n".format(r_len, t_len))
return dict(zip(ranks, thresholds))
def add_names(x, taxid, ncbi_taxa):
"""
This function translates taxonomy ids to names. It operates per-row in the lineage dataframe.
Parameters
----------
x: pandas.DataFrame
DataFrame of one taxid and its taxonomic ranks
taxid: int
Taxid being evaluated
ncbi_taxa: ete3.ncbi_taxonomy.ncbiquery.NCBITaxa
The ete3 sqlite database connection
Returns
-------
The original DataFrame merged with the taxa names
"""
# Get a names dictionary for all taxids in the row
names = ncbi_taxa.get_taxid_translator(list(x.loc[taxid].values) + [taxid])
n = {}
# Iterate ranks
for rank in list(x.columns):
# Get taxid for the current rank
t = x.loc[taxid, rank]
# If taxid is negative it means that there is no classified taxonomy at this rank
# Instead we get the last known name in the hierarchy. We can then use the negative values to translate into
# the name with the "Unclassified." prefix.
# If the name is 'root' we just use 'Unclassified'
if t < 0:
known_name = names[-t]
if known_name == "root":
name = "Unclassified"
else:
name = known_name
# If taxid is positive we just use the name from the dictionary
else:
name = names[t]
# Add name to a dictionary with keys in the form of {rank}.name
n["{}.name".format(rank)] = name
name_df = pd.DataFrame(n, index=[taxid])
return pd.merge(x, name_df, left_index=True, right_index=True)
def propagate_lower(x, taxid, ranks):
"""
Shift known ranks down through the taxonomic hierarchy.
Parameters
----------
x: pandas.DataFrame
DataFrame of one taxid and its taxonomic ranks
taxid: int
Taxid being evaluated
ranks: list
Ranks used for assigning
Returns
-------
pandas.DataFrame updated with missing ranks
Some proteins in the database may map to a taxonomic rank above the lowest taxonomic rank that we are trying to
assign. For instance, if we use the ranks 'superkingdom phylum genus species' and a protein maps to a taxid at
rank phylum then we want to add the taxonomic information at the genus and species levels. This is done here by
adding the negative taxid of the lowest known rank to the lower ranks.
Example:
In the Uniref90 database the entry 'E1GVX1' maps to taxonomy id 838 (rank: genus, name: Prevotella).
When creating the lineage for taxid 838 we add '-838' to rank species.
"""
rev_ranks = [ranks[x] for x in list(range(len(ranks) - 1, -1, -1))]
missing = {}
known = taxid
for rank in rev_ranks[0:]:
if rank not in x.columns:
missing[rank] = -known
else:
known = x.loc[taxid, rank]
return pd.merge(x, pd.DataFrame(missing, index=[taxid]), left_index=True, right_index=True)
def get_lca(r, assignranks, reportranks):
"""
Assign lowest common ancestor from a set of taxids.
Parameters
----------
r: pandas.DataFrame
Results for a single query, extracted from the main diamond results file
assignranks: list
Taxonomic ranks to assign taxonomy for
reportranks: list
Taxonomic ranks to report taxonomy for
Returns
-------
a tuple of dictionaries with ranks as keys and taxa names/ids as values
This function takes a query-slice of the diamond results after filtering by score (and rank-threshold if tango mode
is 'rank_lca' or 'rank_vote'). It then iterates through each rank in reverse order checks how many unique taxids are
found at that rank. If there's only one taxid
"""
query = r.index.unique()[0]
# Reverse ranks for iterating
rev_ranks = [assignranks[x] for x in list(range(len(assignranks) - 1, -1, -1))]
# Iterate through the assignranks
for rank in rev_ranks:
higher_ranks = reportranks[0:reportranks.index(rank) + 1]
higher_rank_names = ["{}.name".format(x) for x in higher_ranks]
# Count number of taxa at rank
c = r.groupby(rank).count()
# If there's only one taxa then we have found the LCA
if len(c) == 1:
if len(r) == 1:
lca_taxids = r.loc[query, higher_ranks].values
else:
lca_taxids = r.loc[query, higher_ranks].values[0]
return dict(zip(higher_ranks, lca_taxids))
return {}
def parse_with_rank_thresholds(r, assignranks, reportranks, rank_thresholds, mode, vote_threshold):
"""Assigns taxonomy using rank_specific thresholds
The ranks used to assign taxonomy are iterated in reverse (e.g. species, genus, phylum),
at each rank results are filtered by the corresponding rank threshold,
if no hits remain after filtering the next rank is evaluated,
Then, if mode=='rank_lca', for remaining hits, a lowest common ancestor is calculated from all remaining taxids.
However, if mode=='rank_vote', taxids are counted among the remaining hits and all results matching taxids
that occur more than vote_threshold are used to determine the lowest common ancestor.
If a taxonomy can be assigned at a rank, it is returned directly. If no taxonomy can be assigned at any of the
ranks, empty results are returned.
Parameters
----------
r: pandas.DataFrame
Dataframe slice for a query
assignranks: list
Taxonomic ranks used to assign taxonomy
reportranks: list
Taxonomic ranks at which taxonomy is reported
rank_thresholds: dict
Dictionary of rank_specific thresholds
mode: str
'rank_lca' or 'rank_vote'
vote_threshold: float
Cutoff used to filter out common taxids
Returns
-------
tuple
Dictionaries with taxonomy names and taxonomy ids at each rank
"""
# Start from lowest rank
rev_ranks = [assignranks[x] for x in list(range(len(assignranks) - 1, -1, -1))]
for rank in rev_ranks:
# Make sure that LCA is not set below current rank
allowed_ranks = assignranks[0:assignranks.index(rank) + 1]
# Get rank threshold
threshold = rank_thresholds[rank]
# Filter results by rank threshold
try:
_r = r.loc[r.pident >= threshold]
except KeyError:
continue
if len(_r) == 0:
continue
lca_taxids = {}
# After filtering, either calculate lca from all filtered taxids
if mode == "rank_lca":
lca_taxids = get_lca(_r, allowed_ranks, reportranks)
# Or at each rank, get most common taxid
elif mode == "rank_vote":
vote = get_rank_vote(_r, rank, vote_threshold)
if len(vote) > 0:
lca_taxids = get_lca(vote, allowed_ranks, reportranks)
if len(lca_taxids.keys()) > 0:
return lca_taxids
return {}
def get_rank_vote(r, rank, vote_threshold=0.5):
"""
Filter results based on fraction of taxa
Parameters
----------
r: pandas.DataFrame
Results for a single query, after filtering with bitscore and rank-specific thresholds
rank: str
Current rank being investigated
vote_threshold: float
Required fraction of hits from a single taxa in order to keep taxa
Returns
-------
Filtered dataframe only containing taxa that meet vote_threshold
Here taxa are counted among all hits remaining for a query after filtering using bitscore and rank-specific
thresholds. Taxa are counted at a certain rank and counts are normalized. Hits belonging to taxa above
vote_threshold are kept while others are filtered out.
"""
# Create dataframe for unique taxids filtered at this rank threshold
taxid_counts = pd.DataFrame(dict.fromkeys(r.staxids.unique(), 1), index=["count"]).T
# Add taxid for rank being investigated
rank_df = r.groupby("staxids").first().reset_index()[[rank, "staxids"]].set_index("staxids")
rank_df = pd.merge(taxid_counts, rank_df, left_index=True, right_index=True)
# Sum counts for current rank
rank_sum = rank_df.groupby(rank).sum()
rank_norm = rank_sum.div(rank_sum.sum())
rank_norm = rank_norm.sort_values("count", ascending=False)
votes = rank_norm.loc[rank_norm["count"] > vote_threshold]
if len(votes) > 0:
return r.loc[r[rank].isin(votes.index)]
return []
def propagate_taxids(res, ranks):
"""
Transfer taxonomy ids to unassigned ranks based on best known taxonomy
Example:
{'species': -1, 'family': -171549, 'genus': -171549, 'order': 171549, 'phylum': 976, 'class': 200643, 'superkingdom': 2}
should become
{'species': -171549, 'family': -171549, 'genus': -171549, 'order': 171549, 'phylum': 976, 'class': 200643, 'superkingdom': 2}
Parameters
----------
res: dict
Dictionary of ranks and taxonomy ids
ranks: list
Ranks to assign taxonomy to
Returns
-------
res: dict
Dictionary with updated taxonomy ids
"""
known = -1
for rank in ranks:
# If not -1 (Unclassified) at rank, store assignment as known
if res[rank] != -1:
known = res[rank]
continue
# If -1 at rank (Unclassified), add the taxid with the '-' prefix
if res[rank] == -1:
res[rank] = -abs(known)
return res
def series2df(df):
"""Converts pandas series to pandas dataframe"""
if str(type(df)) == "<class 'pandas.core.series.Series'>":
df = pd.DataFrame(df).T
return df
def read_taxidmap(f, ids):
"""
Reads the protein to taxid map file and stores mappings
Parameters
----------
f: str
Input file with protein_id->taxid map
ids: list
Protein ids to store taxids for
Returns
-------
Dictionary of protein ids to taxid and all unique taxids
"""
taxidmap = dict.fromkeys(ids, -1)
open_function = open
if ".gz" in f:
open_function = gz.open
with open_function(f, 'rt') as fhin:
for line in tqdm.tqdm(fhin, desc="Reading idmap {}".format(f), ncols=100, unit=" lines"):
items = (line.rstrip()).rsplit()
# If file has only two columns, assume taxid in second
if len(items) == 2:
protid, taxid = items
# Otherwise, assume format is same as NCBI protein mapping
else:
protid, taxid = items[0], items[2]
# Add map to dictionary
# We initialize the dictionary with -1 so we make an attempt to add the taxid + 1
# If the protid is not in the dictionary we skip it
try:
taxidmap[protid] += int(taxid) + 1
except KeyError:
continue
except ValueError:
continue
return | pd.DataFrame(taxidmap, index=["staxids"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
from sklearn.metrics import confusion_matrix
import sys
from scipy.spatial.distance import euclidean as euc
from os import walk
import tensorflow as tf
import os
maxClusterSize = 100
def getDataLen(trainingDict):
n = 0
for w in trainingDict:
# print(w)
n += trainingDict[w]
print('Total number of data points after this round: ', n)
return n
def assignWeights(trainingDf, trainingDict):
n = getDataLen(trainingDict)
trainingDf['Weightage'] = trainingDf['DataSize'].apply(lambda x: x/n)
return trainingDf, n
def scale(weight, scaler):
scaledWeights = []
for i in range(len(weight)):
scaledWeights.append(scaler * weight[i])
return scaledWeights
def getWeight(d):
#creating sequential model
model = Sequential()
model.add(Dense(16, activation='relu', input_dim=30))
model.add(Dropout(0.1))
model.add(Dense(16, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(1, activation='sigmoid'))
# model.summary()
fpath = "./merge/"+d
model.load_weights(fpath)
weight = model.get_weights()
return weight
def getScaledWeight(d, scaler):
#creating sequential model
model = Sequential()
model.add(Dense(16, activation='relu', input_dim=30))
model.add(Dropout(0.1))
model.add(Dense(16, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(1, activation='sigmoid'))
# model.summary()
fpath = "./merge/"+d
model.load_weights(fpath)
weight = model.get_weights()
return scale(weight, scaler)
def avgWeights(scaledWeights):
avg = list()
for weight_list_tuple in zip(*scaledWeights):
layer_mean = tf.math.reduce_sum(weight_list_tuple, axis=0)
avg.append(layer_mean)
return avg
def FedAvg(trainingDict):
trainingDf = pd.DataFrame.from_dict(trainingDict, orient='index', columns=['DataSize'])
models = list(trainingDict.keys())
scaledWeights = []
trainingDf, dataLen = assignWeights(trainingDf, trainingDict)
for m in models:
scaledWeights.append(getScaledWeight(m, trainingDf.loc[m]['Weightage']))
fedAvgWeight = avgWeights(scaledWeights)
return fedAvgWeight, dataLen
def saveModel(weight):
#creating sequential model
model = Sequential()
model.add(Dense(16, activation='relu', input_dim=30))
model.add(Dropout(0.1))
model.add(Dense(16, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(1, activation='sigmoid'))
model.set_weights(weight)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
#Saving Model
fpath = str("./mergeG/newG.h5")
model.save(fpath)
def euclidean(m, n):
distance = []
for i in range(len(m)):
# print(i)
distance.append(euc(m[i].reshape(-1,1), n[i].reshape(-1,1)))
# print(distance)
distance = sum(distance)/len(m)
return distance
def merge(trainingDict, b):
# print(trainingDict)
models = list(trainingDict.keys())
# print(models)
trainingDf = | pd.DataFrame.from_dict(trainingDict, orient='index', columns=['DataSize']) | pandas.DataFrame.from_dict |
import pandas as pd
import numpy as np
from ..siu import create_sym_call, Symbolic
from functools import singledispatch
# TODO: move into siu
def register_symbolic(f):
@f.register(Symbolic)
def _dispatch_symbol(__data, *args, **kwargs):
return create_sym_call(f, __data.source, *args, **kwargs)
return f
# fct_reorder -----------------------------------------------------------------
@register_symbolic
@singledispatch
def fct_reorder(fct, x, func = np.median):
x_vals = x.values if isinstance(x, pd.Series) else x
s = pd.Series(x_vals, index = fct)
# for each cat, calc agg func, make values of ordered the codes
ordered = s.groupby(level = 0).agg(func).sort_values()
ordered[:] = np.arange(len(ordered))
codes = ordered[s.index.values]
return pd.Categorical.from_codes(codes, list(ordered.index))
# fct_recode ------------------------------------------------------------------
@register_symbolic
@singledispatch
def fct_recode(fct, **kwargs):
if not isinstance(fct, pd.Categorical):
fct = pd.Categorical(fct)
rev_kwargs = {v:k for k,v in kwargs.items()}
return fct.rename_categories(rev_kwargs)
# fct_collapse ----------------------------------------------------------------
@register_symbolic
@singledispatch
def fct_collapse(fct, recat, group_other = None):
if not isinstance(fct, pd.Categorical):
fct = pd.Categorical(fct)
# each existing cat will map to a new one ----
# need to know existing to new cat
# need to know new cat to new code
cat_to_new = {k: None for k in fct.categories}
new_cat_set = {k: True for k in fct.categories}
for new_name, v in recat.items():
v = [v] if not np.ndim(v) else v
for old_name in v:
if cat_to_new[old_name] is not None:
raise Exception("category %s was already re-assigned"%old_name)
cat_to_new[old_name] = new_name
del new_cat_set[old_name]
new_cat_set[new_name] = True # add new cat
# collapse all unspecified cats to group_other if specified ----
for k, v in cat_to_new.items():
if v is None:
if group_other is not None:
new_cat_set[group_other] = True
cat_to_new[k] = group_other
del new_cat_set[k]
else:
cat_to_new[k] = k
# map from old cat to new code ----
# calculate new codes
new_cat_set = {k: ii for ii, k in enumerate(new_cat_set)}
# map old cats to them
remap_code = {old: new_cat_set[new] for old, new in cat_to_new.items()}
new_codes = fct.map(remap_code)
new_cats = list(new_cat_set.keys())
return | pd.Categorical.from_codes(new_codes, new_cats) | pandas.Categorical.from_codes |
from datetime import datetime
import numpy as np
from pandas import (
DataFrame,
Index,
MultiIndex,
Period,
Series,
period_range,
to_datetime,
)
import pandas._testing as tm
def test_multiindex_period_datetime():
# GH4861, using datetime in period of multiindex raises exception
idx1 = Index(["a", "a", "a", "b", "b"])
idx2 = period_range("2012-01", periods=len(idx1), freq="M")
s = Series(np.random.randn(len(idx1)), [idx1, idx2])
# try Period as index
expected = s.iloc[0]
result = s.loc["a", Period("2012-01")]
assert result == expected
# try datetime as index
result = s.loc["a", datetime(2012, 1, 1)]
assert result == expected
def test_multiindex_datetime_columns():
# GH35015, using datetime as column indices raises exception
mi = MultiIndex.from_tuples(
[(to_datetime("02/29/2020"), to_datetime("03/01/2020"))], names=["a", "b"]
)
df = DataFrame([], columns=mi)
expected_df = DataFrame(
[],
columns=MultiIndex.from_arrays(
[[to_datetime("02/29/2020")], [ | to_datetime("03/01/2020") | pandas.to_datetime |
#%%
path = '../../dataAndModel/data/o2o/'
import os, sys, pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import date
from sklearn.linear_model import SGDClassifier, LogisticRegression
dfoff = pd.read_csv(path+'ccf_offline_stage1_train.csv')
dftest = pd.read_csv(path+'ccf_offline_stage1_test_revised.csv')
dfon = pd.read_csv(path+'ccf_online_stage1_train.csv')
print('data read end.')
print(dfoff['Discount_rate'])
#%%
dfoff['distance'] = dfoff['Distance'].fillna(-1).astype(int)
print(dfoff['Distance'])
print(dfoff['distance'])
#%%
print(dfoff['distance'][13])
print(dfoff['Distance'][13])
#%%
# %%
# 1. 将满xx减yy类型(`xx:yy`)的券变成折扣率 : `1 - yy/xx`,同时建立折扣券相关的特征 `discount_rate, discount_man, discount_jian, discount_type`
# 2. 将距离 `str` 转为 `int`
# convert Discount_rate and Distance
def getDiscountType(row):
if pd.isnull(row):
return np.nan
elif ':' in row:
return 1
else:
return 0
def convertRate(row):
"""Convert discount to rate"""
if pd.isnull(row):
return 1.0
elif ':' in str(row):
rows = row.split(':')
return 1.0 - float(rows[1])/float(rows[0])
else:
return float(row)
def getDiscountMan(row):
if ':' in str(row):
rows = row.split(':')
return int(rows[0])
else:
return 0
def getDiscountJian(row):
if ':' in str(row):
rows = row.split(':')
return int(rows[1])
else:
return 0
#%%
def processData(df):
# convert discunt_rate
df['discount_rate'] = df['Discount_rate'].apply(convertRate)
df['discount_man'] = df['Discount_rate'].apply(getDiscountMan)
df['discount_jian'] = df['Discount_rate'].apply(getDiscountJian)
df['discount_type'] = df['Discount_rate'].apply(getDiscountType)
print(df['discount_rate'].unique())
# convert distance
df['distance'] = df['Distance'].fillna(-1).astype(int)
return df
dfoff = processData(dfoff)
# dftest = processData(dftest)
#%%
date_received = dfoff['Date_received'].unique()
date_received = sorted(date_received[pd.notnull(date_received)])
date_buy = dfoff['Date'].unique()
date_buy = sorted(date_buy[pd.notnull(date_buy)])
date_buy = sorted(dfoff[dfoff['Date'].notnull()]['Date'])
couponbydate = dfoff[dfoff['Date_received'].notnull()][['Date_received', 'Date']].groupby(['Date_received'], as_index=False).count()
couponbydate.columns = ['Date_received','count']
buybydate = dfoff[(dfoff['Date'].notnull()) & (dfoff['Date_received'].notnull())][['Date_received', 'Date']].groupby(['Date_received'], as_index=False).count()
buybydate.columns = ['Date_received','count']
print("end")
#%%
def getWeekday(row):
if row == 'nan':
return np.nan
else:
return date(int(row[0:4]), int(row[4:6]), int(row[6:8])).weekday() + 1
dfoff['weekday'] = dfoff['Date_received'].astype(str).apply(getWeekday)
dftest['weekday'] = dftest['Date_received'].astype(str).apply(getWeekday)
# weekday_type : 周六和周日为1,其他为0
dfoff['weekday_type'] = dfoff['weekday'].apply(lambda x : 1 if x in [6,7] else 0 )
dftest['weekday_type'] = dftest['weekday'].apply(lambda x : 1 if x in [6,7] else 0 )
# change weekday to one-hot encoding
weekdaycols = ['weekday_' + str(i) for i in range(1,8)]
tmpdf = pd.get_dummies(dfoff['weekday'].replace('nan', np.nan))
print(tmpdf)
tmpdf.columns = weekdaycols
dfoff[weekdaycols] = tmpdf
print(dfoff)
tmpdf = pd.get_dummies(dftest['weekday'].replace('nan', np.nan))
tmpdf.columns = weekdaycols
dftest[weekdaycols] = tmpdf
def label(row):
if pd.isnull(row['Date_received']):
return -1
if pd.notnull(row['Date']):
td = pd.to_datetime(row['Date'], format='%Y%m%d') - | pd.to_datetime(row['Date_received'], format='%Y%m%d') | pandas.to_datetime |
import os
import random
from io import BytesIO
from tempfile import TemporaryDirectory
import tensorflow as tf
from PIL import Image
from google.cloud import storage
import numpy as np
import glob
from tqdm import tqdm
import h5py
import json
from data.thor_constants import THOR_AFFORDANCES, THOR_OBJECT_TYPES, THOR_ACTIONS, _action_to_type_ind, \
_object_to_type_ind, _object_to_statechange_df, _fixup_df, THOR_ACTION_TYPE_TO_IND
from typing import List
import pandas as pd
class S3TFRecordWriter(object):
def __init__(self, fn, buffer_size=10000):
"""
Upload to gcloud
:param fn:
:param buffer_size: Trying to space out idential things here by shuffling a buffer
p(first lasts until the end,N) = (1-pflush) ^ (N/(p*buffer_size))
each flush event removes buffer_size*p
If the buffer size is big enough then we have good randomness I think
"""
self.fn = fn
if fn.startswith('gs://'):
self.gclient = storage.Client()
self.storage_dir = TemporaryDirectory()
self.writer = tf.io.TFRecordWriter(os.path.join(self.storage_dir.name, 'temp.tfrecord'))
self.bucket_name, self.file_name = self.fn.split('gs://', 1)[1].split('/', 1)
else:
self.gclient = None
self.bucket_name = None
self.file_name = None
self.storage_dir = None
self.writer = tf.io.TFRecordWriter(fn)
self.buffer_size = buffer_size
self.buffer = []
self.num_written = 0
def write(self, x):
self.num_written += 1
if self.buffer_size < 10:
self.writer.write(x)
return
if len(self.buffer) < self.buffer_size:
self.buffer.append(x)
else:
random.shuffle(self.buffer)
for i in range(self.buffer_size // 5): # Pop 20%
self.writer.write(self.buffer.pop())
def close(self):
# Flush buffer
for x in self.buffer:
self.writer.write(x)
self.writer.close()
if self.gclient is not None:
print(f"UPLOADING {self.num_written}ex!!!!!", flush=True)
bucket = self.gclient.get_bucket(self.bucket_name)
blob = bucket.blob(self.file_name)
blob.upload_from_filename(os.path.join(self.storage_dir.name, 'temp.tfrecord'))
self.storage_dir.cleanup()
def __enter__(self):
# Called when entering "with" context.
return self
def __exit__(self, *_):
# Called when exiting "with" context.
# Upload shit
print("CALLING CLOSE")
self.close()
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def int64_list_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def bytes_list_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _convert_image_to_jpgstring(image):
"""
:param image: Numpy array of an image [H, W, 3]
:return: it, as a jpg string
"""
with BytesIO() as output:
image_pil = Image.fromarray(image, mode='RGB')
image_pil.save(output, format='JPEG', quality=95)
return output.getvalue()
def _convert_image_seq_to_jpgstring(image):
"""
:param image: Numpy array of an image [N, H, W, 3]
:return: it, as a jpg string
"""
with BytesIO() as output:
image_pil = Image.fromarray(image.reshape((image.shape[0] * image.shape[1], image.shape[2], 3)), mode='RGB')
image_pil.save(output, format='JPEG', quality=95)
return output.getvalue()
def _print_padding_tradeoff(lens, ps=(80, 85, 90, 95, 99, 100,)):
"""
Given the lengths of everything, print out how mcuh we lose by cutting it off to a shorter percentile
:param lens: Lengths
:param ps: Percentiles
:return:
"""
lens_array = np.array(lens)
for p in ps:
lensp = np.percentile(lens_array, p)
lensused = np.minimum(lens_array, lensp).sum()
lenstotal = np.sum(lens_array)
wasted_space = np.sum(lensp - np.minimum(lens_array, lensp)) / (lensp * len(lens_array))
print(
"Lens {}%: {:.3f}. Using that as seqlength, we use {} frames of {} ({:.3f}), wasted space {:.3f}".format(
p, np.percentile(lens_array, p), lensused, lenstotal, lensused / lenstotal, wasted_space),
flush=True)
#############################
def traj_dataloader(all_fns, include_frames=False):
"""
:param all_fns: list of all filenames to use
:param include_frames: Whether to include the img
:return:
"""
for fn in tqdm(all_fns):
try:
h5reader = h5py.File(fn, 'r')
# Process it
item = {}
for k in ['meta_info', 'alias_object_id_to_old_object_id', 'object_id_to_states', 'output_action_results',
'output_actions']:
item[k] = json.loads(h5reader[k][()].decode('utf-8'))
item['object_ids'] = [x.decode('utf-8') for x in h5reader['object_ids'][()].tolist()]
for k, k_v in h5reader['pos3d'].items():
for t, v in k_v.items():
item['object_id_to_states'][k][t]['pos3d'] = v[()]
# bboxes
bbox_keys = sorted([int(k) for k in h5reader['bboxes'].keys()])
item['bboxes'] = [h5reader['bboxes'][(str(k))][()] for k in bbox_keys]
if not all([x.dtype == np.uint16 for x in item['bboxes']]): # Previously I had a clipping bug
raise ValueError("dtype")
if include_frames:
item['frames'] = h5reader['frames'][()]
item['agent_states'] = h5reader['agent_states'][()]
item['meta_info']['fn'] = fn
yield item
except Exception as e:
print("Error with {}: {}".format(fn, str(e)), flush=True)
def traj_dataloader_v2(all_fns, IMAGE_WIDTH=640, IMAGE_HEIGHT=384):
for item in traj_dataloader(all_fns, include_frames=True):
num_frames = item['frames'].shape[0]
main_object_ids, object_id_to_main_ind = _get_main_object_id_mappings(item['meta_info']['main_object_ids'],
all_object_ids=item['object_ids'],
output_actions=item['output_actions'],
alias_object_id_to_old_object_id=item[
'alias_object_id_to_old_object_id'])
# boxes - use an extra ind that tells us what frame we're on
# [img_id, obj_id, x1, y1, x2, y2].
bboxes_list = [_convert_bboxes(v, t, object_ids=item['object_ids'],
image_width=IMAGE_WIDTH,
image_height=IMAGE_HEIGHT,
) for t, v in enumerate(item['bboxes']) if v.size > 0]
bboxes_df = pd.concat([x for x in bboxes_list if x.size > 0], 0)
bboxes_df['main_inds'] = bboxes_df['object_ids'].apply(lambda x: object_id_to_main_ind[x])
# SORT bboxes_df by first, frame number, then, whether it's a main ind or not, and third the size
item['bboxes_df'] = bboxes_df.sort_values(by=['frame', 'main_inds', 'size'], ascending=[True, False, False],
ignore_index=True)
item['output_actions'].append({'action': 'Done'})
item['output_action_results'].append({'action_success': True, 'action_err_msg': ''})
# Predict next action maybe
item['actions'] = pd.DataFrame([_convert_action(x, main_object_ids=main_object_ids,
alias_object_id_to_old_object_id=item[
'alias_object_id_to_old_object_id'])
for x in item['output_actions']])
del item['output_actions']
df_mapping = {}
# Compute object -> size and also get a dynamic mapping of the states over time
object_to_size = {}
for k, sz in item['object_id_to_states'].items():
for s in sz.values():
size = np.prod(s['pos3d'][-1] + 1e-8)
object_to_size[k] = max(size, object_to_size.get(k, 0.0))
for oid in main_object_ids:
oid_list = [oid] + [aid for aid, oid2 in item['alias_object_id_to_old_object_id'].items() if oid2 == oid]
df_mapping[oid] = _object_to_statechange_df([item['object_id_to_states'][k] for k in oid_list],
num_frames=item['frames'].shape[0],
object_to_size=object_to_size)
item['df_mapping'] = df_mapping
item['main_object_ids'] = main_object_ids
item['object_id_to_main_ind'] = object_id_to_main_ind
yield item
def _convert_bboxes(bboxes_t, t, object_ids, image_width, image_height):
"""
Converts bboxes into tensorflow format
:param bboxes_t: [N boxes, [obj_id, x1, y1, x2, y2]]
:param t: Int
:param object_ids: Mapping obj_id -> string
:param image_width:
:param image_height:
:return:
"""
# Convert to tf format
bbox_info_float = bboxes_t.astype(np.float32)[:, 1:5] / \
np.array([image_width, image_height, image_width, image_height], dtype=np.float32)[None]
sizes = np.sqrt((bbox_info_float[:, 2] - bbox_info_float[:, 0]) * (bbox_info_float[:, 3] - bbox_info_float[:, 1]))
# Get rid of really small objects
big_enough = sizes > np.sqrt(4.0 / (image_height * image_width))
bbox_info_float = bbox_info_float[big_enough]
bboxes_t = bboxes_t[big_enough]
sizes = sizes[big_enough]
df = pd.DataFrame(np.column_stack([bbox_info_float, sizes]), columns=['xmin', 'ymin', 'xmax', 'ymax', 'size'])
df['frame'] = t
df['object_ids'] = [object_ids[i] for i in bboxes_t[:, 0]]
df['category_ids'] = df['object_ids'].apply(_object_to_type_ind)
return df
def _convert_action(action, main_object_ids: List[str], alias_object_id_to_old_object_id):
"""
Convert an action into something referring to the main obj ids (dealing with aliases and stuff)
:param action:
:param main_object_ids:
:param alias_object_id_to_old_object_id:
:return:
"""
results = {'action_id': _action_to_type_ind(action)}
oid_to_ind = {oid: i for i, oid in enumerate(main_object_ids)}
for alias_object_id, old_object_id in alias_object_id_to_old_object_id.items():
oid_to_ind[alias_object_id] = oid_to_ind[old_object_id]
if 'objectId' in action:
results['object_id'] = oid_to_ind[action['objectId']]
else:
results['object_id'] = -1
if 'receptacleObjectId' in action:
results['receptacle_object_id'] = oid_to_ind[action['receptacleObjectId']]
else:
results['receptacle_object_id'] = -1
return results
def _get_main_object_id_mappings(main_object_ids, all_object_ids,
output_actions, alias_object_id_to_old_object_id):
"""
Return a list of main object IDs, and a mapping from all object Ids to the main ones
:param main_object_ids: Main ids identified by the sampler
:param all_object_ids: All object IDs ever seen
:param output_actions: All output actions -- we might need to add more main object IDs if needed
:param alias_object_id_to_old_object_id: Aliases - e.g. if we chop somethign it changes ID. ugh
:return: new list of main object IDs, and a mapping of objectId to main ind (or 0 otherwise). Starts at 1.
"""
# Create a mapping of objectId -> mainObjectId ind (or nothing!)
# Tack on enough things to main object ids if they're referenced
if isinstance(main_object_ids, str): # Not sure what's going on here
main_object_ids = [main_object_ids]
ref_oids = set([v for a in output_actions for k, v in a.items() if k.endswith('bjectId')])
for roid in sorted(ref_oids):
if roid not in sorted(alias_object_id_to_old_object_id.keys()) + main_object_ids:
main_object_ids.append(roid)
# print("{} objects: {}".format(len(main_object_ids), main_object_ids), flush=True)
object_id_to_main_ind = {oid: -1 for oid in all_object_ids}
for i, mi in enumerate(main_object_ids):
object_id_to_main_ind[mi] = i
for k, v in alias_object_id_to_old_object_id.items():
if v == mi:
object_id_to_main_ind[k] = i
return main_object_ids, object_id_to_main_ind
def traj_dataloader_v3(all_fns, IMAGE_WIDTH=640, IMAGE_HEIGHT=384, include_frames=False):
for item in traj_dataloader(all_fns, include_frames=include_frames):
main_object_ids, object_id_to_main_ind = _get_main_object_id_mappings(item['meta_info']['main_object_ids'],
all_object_ids=item['object_ids'],
output_actions=item['output_actions'],
alias_object_id_to_old_object_id=item[
'alias_object_id_to_old_object_id'])
# boxes - use an extra ind that tells us what frame we're on
# [img_id, obj_id, x1, y1, x2, y2].
bboxes_list = [_convert_bboxes(v, t, object_ids=item['object_ids'],
image_width=IMAGE_WIDTH,
image_height=IMAGE_HEIGHT,
) for t, v in enumerate(item['bboxes']) if v.size > 0]
bboxes_df = pd.concat([x for x in bboxes_list if x.size > 0], 0)
del item['bboxes']
bboxes_df['main_inds'] = bboxes_df['object_ids'].apply(lambda x: object_id_to_main_ind[x])
# SORT bboxes_df by first, frame number, then, whether it's a main ind or not, and third the size
item['bboxes_df'] = bboxes_df.sort_values(by=['frame', 'main_inds', 'size'], ascending=[True, False, False],
ignore_index=True)
item['output_actions'].append({'action': 'Done'})
item['output_action_results'].append({'action_success': True, 'action_err_msg': ''})
item['num_frames'] = len(item['output_actions'])
# Predict next action maybe
item['actions'] = pd.DataFrame([_convert_action(x, main_object_ids=main_object_ids,
alias_object_id_to_old_object_id=item[
'alias_object_id_to_old_object_id'])
for x in item['output_actions']])
item['actions']['succeeds'] = [x['action_success'] for x in item['output_action_results']]
item['actions']['err_msg'] = [x['action_err_msg'] for x in item['output_action_results']]
del item['output_action_results']
item['actions']['action_name'] = item['actions']['action_id'].apply(lambda x: THOR_ACTIONS[x - 1])
item['actions']['object_name'] = item['actions']['object_id'].apply(
lambda x: main_object_ids[x] if x >= 0 else None)
item['actions']['receptacle_name'] = item['actions']['receptacle_object_id'].apply(
lambda x: main_object_ids[x] if x >= 0 else None)
bad_cols = ['canChangeTempToHot', 'canChangeTempToCold', 'salientMaterials_None']
df_mapping = {}
# Compute object -> size and also get a dynamic mapping of the states over time
object_to_size = {}
for k, sz in item['object_id_to_states'].items():
for s in sz.values():
size = np.prod(s['pos3d'][-1] + 1e-8)
object_to_size[k] = max(size, object_to_size.get(k, 0.0))
for oid in main_object_ids:
oid_list = [oid] + [aid for aid, oid2 in item['alias_object_id_to_old_object_id'].items() if oid2 == oid]
df_mapping[oid] = _object_to_statechange_df([item['object_id_to_states'][k] for k in oid_list],
num_frames=item['num_frames'],
object_to_size=object_to_size,
include_pos=True,
agent_states=item['agent_states'])
# FIX BUGS
object_name = oid.split('|')[0]
# Coffee machines
if object_name in ('CoffeeMachine', 'StoveBurner'):
assert not df_mapping[oid]['isBroken'].any()
df_mapping[oid]['breakable'] = False
# These things haven't been changing state to 'slicd'
if object_name.startswith(('Potato', 'Tomato', 'Apple', 'Lettuce', 'Bread', 'Egg')):
df_mapping[oid]['isSliced'] |= (~df_mapping[oid]['sliceable'])
df_mapping[oid].loc[:, 'cookable'] = True
elif df_mapping[oid]['salientMaterials_Food'].any():
import ipdb
ipdb.set_trace()
elif len(df_mapping[oid]['sliceable'].value_counts()) > 1:
import ipdb
ipdb.set_trace()
# some objects should ALWAYS be at room temperature
if object_name in ('CounterTop',):
df_mapping[oid]['ObjectTemperature'] = 1
# Objects never cool down UNLESS they are put in a fridge
# temp_lowered = [False] + (df_mapping[oid]['ObjectTemperature'].values[1:] < df_mapping[oid]['ObjectTemperature'].values[:-1]).tolist()
# if any(temp_lowered):
# import ipdb
# ipdb.set_trace()
# Don't change these
# Some bugs with things not getting cooked in the microwave (or even getting placed there?)
microwave_rows = item['actions'].apply(
lambda row: (row['action_name'] == 'PutObject') and row['receptacle_name'].startswith('Microwave') and (
row['object_name'] == oid) and row['succeeds'], axis=1)
if microwave_rows.any():
inds = df_mapping[oid].index > np.min(np.where(microwave_rows)[0])
if df_mapping[oid]['cookable'].any():
df_mapping[oid].loc[inds, 'isCooked'] = True
df_mapping[oid].loc[inds, 'ObjectTemperature'] = 2
# Kill these columns
df_mapping[oid] = df_mapping[oid][[c for c in df_mapping[oid].columns
if c not in bad_cols]]
############################
# fix stoveburner nonsense
if item['actions'].apply(lambda row: (row['action_name'] == 'ToggleObjectOn') and (
row['object_name'].split('|')[0] == 'StoveKnob'), axis=1).any():
# First FAIL FAST if not enough important objects
needed = ['StoveBurner', 'StoveKnob']
if not all([n in ' '.join(main_object_ids) for n in needed]) or len(main_object_ids) < 4:
continue
# Create a new actions df
actions2 = []
for t_, row in item['actions'].iterrows():
actions2.append(row)
if (row['action_name'] == 'ToggleObjectOn') and (row['object_name'].split('|')[0] == 'StoveKnob'):
# Find a stoveburner
stoveburner_id = [k for k, v in df_mapping.items() if
k.startswith('StoveBurner') and v.shape[0] > (t_ + 1) and (
v.iloc[t_ + 1:]['ObjectTemperature'] == 2).any()]
if len(stoveburner_id) == 0:
import ipdb
ipdb.set_trace()
stoveburner_id = stoveburner_id[0]
pan_id = []
for t__, v__ in item['object_id_to_states'][stoveburner_id].items():
if int(t__) > t_:
for r_o_id in v__['receptacleObjectIds']:
if r_o_id in df_mapping and df_mapping[r_o_id].iloc[0]['receptacle']:
pan_id.append(r_o_id)
#
#
# pan_id = [k for k in item['object_id_to_states'][stoveburner_id][str(t_+1)]['receptacleObjectIds']
# if item['object_id_to_states'][stoveburner_id][str(t_+1)]['receptacle']]
if len(pan_id) == 0:
import ipdb
ipdb.set_trace()
pan_id = pan_id[0]
# OK now we can add the dummy action
actions2.append(pd.Series({
'action_id': THOR_ACTION_TYPE_TO_IND['HeatUpPan'],
'object_id': object_id_to_main_ind[pan_id],
'receptacle_object_id': -1,
'succeeds': True,
'err_msg': '',
'action_name': 'HeatUpPan',
'object_name': pan_id,
'receptacle_name': None,
}, name=t_))
# NOTE THAT THIS RENDERS object_id_to_states OBSOLETE
actions2 = pd.DataFrame(actions2)
idx = actions2.index.values
# Create an alternate index where instead of like
# array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
# 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 28, 29, 30])
# see that 28?
# we have
# array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
# 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 29, 30])
# Since at 28 we have "ToggleObjectOn" and then the new action "HeatUpPan"
idx2 = np.copy(idx)
for i in np.where(idx[1:] == idx[:-1])[0]:
idx2[i + 1] = idx[i + 1] + 1
item['agent_states'] = item['agent_states'][idx]
bboxes_df_grouped = item['bboxes_df'].groupby('frame')
item['bboxes_df'] = pd.concat(
[bboxes_df_grouped.get_group(t) for t in idx if t in bboxes_df_grouped.groups], 0).reset_index(
drop=True)
if ('frames' in item) and (len(idx) != item['num_frames']):
item['frames'] = item['frames'][idx]
item['num_frames'] = len(idx)
item['actions'] = actions2.reset_index(drop=True)
for k in sorted(df_mapping.keys()):
if k.startswith(('StoveBurner', 'StoveKnob')):
df_mapping[k] = df_mapping[k].iloc[idx2].reset_index(drop=True)
else:
df_mapping[k] = df_mapping[k].iloc[idx].reset_index(drop=True)
# This is stupid but whatever
for t, row in item['actions'].iterrows():
if (row['action_name'] == 'ToggleObjectOn') and row['object_name'].startswith('StoveKnob'):
for k in sorted(df_mapping.keys()):
# Heat up all stove burners
if k.startswith('StoveBurner'):
df_mapping[k].loc[t + 1:, 'ObjectTemperature'] = 2
if row['action_name'] == 'HeatUpPan':
for k in sorted(df_mapping.keys()):
if (df_mapping[k].loc[t:(t + 5), 'ObjectTemperature'] == 2).any():
# print("FIXING heatuppan{}".format(k), flush=True)
df_mapping[k].loc[t + 1:, 'ObjectTemperature'] = 2
if df_mapping[k].iloc[0]['cookable']:
df_mapping[k].loc[t + 1:, 'isCooked'] = True
# Sinks should fill with water
#####
for k, v in df_mapping.items():
if k.startswith('Sink'):
v['canFillWithLiquid'] = True
filled_now = False
filled_list = [False]
for _, row in item['actions'].iterrows():
if row['object_name'] is not None and row['object_name'].startswith('Faucet'):
if row['action_name'] == 'ToggleObjectOn':
filled_now = True
elif row['action_name'] == 'ToggleObjectOff':
filled_now = False
filled_list.append(filled_now)
v['isFilledWithLiquid'] = filled_list[:-1]
if k.startswith('Faucet'):
# Weird stuff with size!
v['sizeraw'] = v.iloc[0]['sizeraw']
v['size'] = v.iloc[0]['size']
# If there's a pan fail then skip
if (item['actions']['action_name'] == 'HeatUpPan').any():
becomes_cooked = False
for v in df_mapping.values():
if v['cookable'].any() and len(v['isCooked'].value_counts()) == 2:
becomes_cooked = True
if not becomes_cooked:
# print("SKIPPING BECAUSE NOTHING BECAME COOKED", flush=True)
continue
item['df_mapping'] = df_mapping
item['main_object_ids'] = main_object_ids
item['object_id_to_main_ind'] = object_id_to_main_ind
########### Separate into "Temporal" and "non-temporal"
keys = []
df_rows = []
for k, v in item['object_id_to_states'].items():
if len(v) > 0:
keys.append(k)
all_vals = list(v.values())
df_rows.append(random.choice(all_vals))
# if '0' in v:
# keys.append(k)
# df_rows.append(v['0'])
nontemporal_mapping = _fixup_df( | pd.DataFrame(df_rows) | pandas.DataFrame |
import functools
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.transforms as transforms
import matplotlib.pyplot as plt
import scipy.interpolate as interp
import scipy.optimize as opt
from .stats import poisson_interval
__all__ = [
"cms_label", "legend_data_mc", "data_mc", "data", "mc", "heatmap",
"annotate_heatmap",
"process_names", "process_colours",
"impacts", "nllscan",
]
def cms_label(ax, label, lumi=35.9, energy=13):
ax.text(
0, 1, r'$\mathbf{CMS}\ \mathit{'+label+'}$',
ha='left', va='bottom', transform=ax.transAxes,
)
ax.text(
1, 1, r'${:.1f}\ \mathrm{{fb}}^{{-1}}$ ({:.0f} TeV)'.format(lumi, energy),
ha='right', va='bottom', transform=ax.transAxes,
)
def legend_data_mc(
ax, df_data, df_mc, label, add_ratios=True, offaxis=True, legend_kw={},
):
handles, labels = ax[0].get_legend_handles_labels()
if add_ratios:
# sort by process total
tdf_mc = pd.pivot_table(
df_mc, index=label, columns="parent",
values="sum_w", aggfunc=np.sum,
)
tdf_mc = tdf_mc[tdf_mc.sum(axis=0).sort_values().index]
data_idx = labels.index("Data")
data_label = labels.pop(data_idx)
labels = (labels+[data_label])[::-1]
data_handle = handles.pop(data_idx)
handles = (handles+[data_handle])[::-1]
df_data_sum = df_data.sum()
tdf_mc_sum = tdf_mc.sum()
fractions = [
df_data_sum["sum_w"]/tdf_mc_sum.sum(), 1.,
] + list((tdf_mc_sum / tdf_mc_sum.sum()).values[::-1])
fraction_labels = [
"{:.3f} {}".format(fractions[idx], labels[idx])
for idx in range(len(labels))
]
else:
handles = handles[::-1]
fraction_labels = labels[::-1]
kwargs = dict(legend_kw)
kwargs_noloc = dict(kwargs)
kwargs_noloc.pop("loc", None)
if offaxis:
box = ax[0].get_position()
ax[0].set_position([box.x0, box.y0, box.width*0.8, box.height])
ax[0].legend(
handles, fraction_labels, bbox_to_anchor=(1, 1), **kwargs_noloc
)
box = ax[1].get_position()
ax[1].set_position([box.x0, box.y0, box.width*0.8, box.height])
else:
ax[0].legend(handles, fraction_labels, **kwargs)
handles, labels = ax[1].get_legend_handles_labels()
if offaxis:
ax[1].legend(handles, labels, bbox_to_anchor=(1, 1), **kwargs_noloc)
else:
ax[1].legend(handles, labels, **kwargs)
def bin_lows_to_edges_cents(lows):
edges = np.array(list(lows)+[2*lows[-1]-lows[-2]])
cents = (edges[:-1] + edges[1:])/2.
return edges, cents
def data(ax, df, label, bins, data_kw={}):
bin_edges, bin_cents = bin_lows_to_edges_cents(bins)
# draw
kwargs = dict(fmt='o', lw=1, color='black', label='Data')
kwargs.update(data_kw)
mask = (df["sum_ww"]==0.)
neff = df["sum_w"]**2 / df["sum_ww"]
neff[mask] = 0.
scale = df["sum_w"]/neff
scale[mask] = 1.
down, up = poisson_interval(neff, scale=scale)
ax.errorbar(
bin_cents, df["sum_w"], yerr=[df["sum_w"]-down, up-df["sum_w"]],
**kwargs,
)
def poisson_interval_with_checks(x, variance):
down, up = poisson_interval(x**2/variance, scale=variance/x)
mask = (variance==0.)
down[mask] = 0.
up[mask] = np.inf
return down, up
def mc(
ax, df, label, bins, mcstat=False, mc_kw={}, mcstat_kw={}, proc_kw={},
zorder=0, interval_func=poisson_interval_with_checks
):
stacked = mc_kw.pop("stacked") if "stacked" in mc_kw else False
bin_edges, bin_cents = bin_lows_to_edges_cents(bins)
# preprocess mc
tdf = pd.pivot_table(
df, index=label, columns="parent",
values="sum_w", aggfunc=np.sum,
)
# sort by process total
tdf_procsum = tdf.sum(axis=0)
tdf = tdf[tdf_procsum.sort_values().index]
# mc
procs = tdf.columns.to_series()
cumsum = tdf.iloc[:,0].copy(deep=True)
cumsum.values[:] = 0.
for idx, proc in enumerate(tdf.columns):
if stacked:
prev_cumsum = cumsum.copy(deep=True)
cumsum += tdf[proc]
else:
cumsum = tdf[proc]
color = proc_kw.get("colours", {}).get(proc, "blue")
kwargs = {
"color": color, "ec": color,
"label": proc_kw.get("labels", {}).get(proc, proc),
}
kwargs.update(mc_kw)
kwargs["zorder"] = -idx
ax.hist(bin_cents, bins=bin_edges, weights=cumsum, **kwargs)
if mcstat:
tdf_ww_up = pd.pivot_table(
df, index=label, columns="parent",
values="sum_ww_up", aggfunc=np.sum,
)
_, up = interval_func(tdf.values[:,0], tdf_ww_up.values[:,0])
tdf_ww_down = pd.pivot_table(
df, index=label, columns="parent",
values="sum_ww_down", aggfunc=np.sum,
)
down, _ = interval_func(tdf.values[:,0], tdf_ww_down.values[:,0])
kwargs = dict(color='black', alpha=0.2)
kwargs.update(mcstat_kw)
ax.fill_between(
bin_edges, list(up)+[list(up)[-1]],
list(down)+[list(down)[-1]],
step='post', **kwargs
)
def data_mc(
ax, df_data, df_mc, label, bins,
sigs=[], blind=False, log=True, legend=True, ratio=True, sm_total=True,
mcstat_top=False, mcstat=True, add_ratios=True, show_zeros=False,
mc_kw={}, sig_kw={}, mcstat_kw={}, sm_kw={}, data_kw={}, proc_kw={},
legend_kw={}, cms_kw={}, interval_func=poisson_interval_with_checks,
):
_df_data = df_data.copy(deep=True)
_df_mc = df_mc.copy(deep=True)
if not show_zeros:
_df_data.loc[_df_data["sum_w"]==0.,"sum_w"] = np.nan
# only mc sum_ww can be asymmetric
if "sum_ww_up" not in _df_mc:
_df_mc["sum_ww_up"] = _df_mc["sum_ww"]
if "sum_ww_down" not in _df_mc:
_df_mc["sum_ww_down"] = _df_mc["sum_ww"]
# collect signals if set
sigs = sigs[::-1]
sig_mask = ~_df_mc.index.get_level_values("parent").isin(sigs)
df_sig = _df_mc.loc[~sig_mask].copy(deep=True)
df_mc_sm = _df_mc.loc[sig_mask].copy(deep=True)
# preprocessing
df_mc_sum = df_mc_sm.groupby(label).sum()
df_mc_sum.loc[:,"parent"] = "SMTotal"
df_mc_sum = df_mc_sum.groupby(["parent", label]).sum()
# draw
if log:
ax[0].set_yscale('log')
bin_edges, _ = bin_lows_to_edges_cents(bins)
ax[0].set_xlim(bin_edges.min(), bin_edges.max())
# signals - top panel
sig_kw_ = dict(histtype='step', zorder=1)
sig_kw_.update(sig_kw)
if len(sigs) > 0:
mc(
ax[0], df_sig, label, bins, mcstat=False, mc_kw=sig_kw_,
proc_kw=proc_kw, interval_func=interval_func,
)
# MC - top panel
mc_kw_ = dict(stacked=True)
mc_kw_.update(mc_kw)
mc(
ax[0], df_mc_sm, label, bins, mcstat=False,
mc_kw=mc_kw_, proc_kw=proc_kw, interval_func=interval_func,
)
# SM total - top panel
if sm_total:
mc_kw_ = dict(histtype='step')
mc_kw_.update(sm_kw)
mcstat_kw_ = dict(label="", color="black", alpha=0.2)
mcstat_kw_.update(mcstat_kw)
mc(
ax[0], df_mc_sum, label, bins, mcstat=mcstat_top, mc_kw=mc_kw_,
mcstat_kw=mcstat_kw_, proc_kw=proc_kw, interval_func=interval_func,
)
# Data - top panel
if not blind:
data(ax[0], _df_data, label, bins, data_kw=data_kw)
# CMS label - top panel
kwargs = dict(label="Preliminary", lumi=35.9, energy=13)
kwargs.update(cms_kw)
#cms_label(ax[0], **kwargs)
# SM total ratio - bottom panel
df_mc_sum_ratio = df_mc_sum.copy()
df_mc_sum_ratio.loc[:,"sum_w"] = 1.
df_mc_sum_ratio.loc[:,"sum_ww_up"] = (
df_mc_sum["sum_ww_up"]/df_mc_sum["sum_w"]**2
)
df_mc_sum_ratio.loc[:,"sum_ww_down"] = (
df_mc_sum["sum_ww_down"]/df_mc_sum["sum_w"]**2
)
if ratio:
mc_kw_ = dict(label="", histtype='step')
mc_kw_.update(sm_kw)
mcstat_kw_ = dict(label="MC stat. unc.", color="black", alpha=0.2)
mcstat_kw_.update(mcstat_kw)
mc(
ax[1], df_mc_sum_ratio, label, bins, mcstat=mcstat, mc_kw=mc_kw_,
mcstat_kw=mcstat_kw_, proc_kw=proc_kw, interval_func=interval_func,
)
# Data ratio - bottom panel
if not blind:
kwargs = dict(data_kw)
kwargs["label"] = ""
df_data_ratio = _df_data.copy()
df_data_ratio.loc[:,"sum_w"] = _df_data["sum_w"]/df_mc_sum["sum_w"].values
df_data_ratio.loc[:,"sum_ww"] = _df_data["sum_ww"]/df_mc_sum["sum_w"].values**2
data(ax[1], df_data_ratio, label, bins, data_kw=kwargs)
if legend:
offaxis = legend_kw.pop("offaxis", True)
kwargs = dict(labelspacing=0.05)
kwargs.update(legend_kw)
legend_data_mc(
ax, _df_data, _df_mc, label, add_ratios=add_ratios,
offaxis=offaxis, legend_kw=kwargs,
)
return ax
def heatmap(
data, row_labels, col_labels, ax, cbar_kw=dict(fraction=0.046, pad=0.04),
cbarlabel="", grid_kw={}, tick_kw={}, **kwargs,
):
if not ax:
ax = plt.gca()
im = ax.imshow(data, **kwargs)
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
ax.tick_params(**tick_kw)
# Rotate the tick labels and set their alignment.
plt.setp(
ax.get_xticklabels(), ha="right", #rotation=-30,
rotation_mode="anchor",
)
# Turn spines off and create white grid.
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
gkw = dict(which="minor", color="w", linestyle='-', linewidth=2)
gkw.update(grid_kw)
ax.grid(**gkw)
ax.tick_params(
which="minor", bottom=False, left=False, top=False, right=False,
)
ax.tick_params(
which="major", bottom=False, left=False, top=False, right=False,
)
return im, cbar
def annotate_heatmap(
im, data=None, valfmt="{x:.2f}", textcolors=["black", "white"],
cthreshold=lambda z: True, vthreshold=lambda z: True, **textkw,
):
if not isinstance(data, (list, np.ndarray)):
data = im.get_array()
kw = dict(ha="center", va="center")
kw.update(textkw)
# Get the formatter in case a string is supplied
if isinstance(valfmt, str):
valfmt = mpl.ticker.StrMethodFormatter(valfmt)
# Loop over the data and create a `Text` for each "pixel".
# Change the text's color depending on the data.
texts = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color=textcolors[int(cthreshold(data[i, j]))])
if not vthreshold(data[i, j]):
continue
text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)
texts.append(text)
return texts
process_colours = {
"SMTotal": "black",
"MET": "black",
"SingleMuon": "black",
"SingleElectron": "black",
"ZJetsToNuNu": "#80b1d3",
"WJetsToLNu": "#b3de69",
"WJetsToENu": "#b2df8a",
"WJetsToMuNu": "#b3de69",
"WJetsToTauNu": "#8dd3c7",
"WJetsToTauLNu": "#8dd3c7",
"WJetsToTauHNu": "#8dd3c7",
"Diboson": "#fdb462",
"DYJetsToLL": "#ffed6f",
"DYJetsToEE": "#fff6b3",
"DYJetsToMuMu": "#ffed6f",
"DYJetsToTauTau": "#ffe41a",
"DYJetsToTauLTauL": "#ffe41a",
"DYJetsToTauHTauL": "#ffe41a",
"DYJetsToTauHTauH": "#ffe41a",
"EWKV2Jets": "#bebada",
"SingleTop": "#fccde5",
"TTJets": "#bc80bd",
"Top": "#bc80bd",
"QCD": "#fb8072",
"G1Jet": "#ccebc5",
"VGamma": "#ffffb3",
"Minor": "#d9d9d9",
"MinorBkgs": "#d9d9d9",
}
process_names = {
"SMTotal": "SM total",
"MET": "MET",
"SingleMuon": "Single Muon",
"SingleElectron": "Single Electron",
"ZJetsToNuNu": "$Z(\\rightarrow \\nu\\nu)+j$",
"WJetsToLNu": "$W(\\rightarrow l\\nu)+j$",
"WJetsToENu": "$W(\\rightarrow e\\nu)+j$",
"WJetsToMuNu": "$W(\\rightarrow \\mu\\nu)+j$",
"WJetsToTauNu": "$W(\\rightarrow \\tau\\nu)+j$",
"WJetsToTauLNu": "$W(\\rightarrow \\tau_{l}\\nu)+j$",
"WJetsToTauHNu": "$W(\\rightarrow \\tau_{h}\\nu)+j$",
"Diboson": "Diboson",
"DYJetsToLL": "$Z/\\gamma^{*}(\\rightarrow ll)+j$",
"DYJetsToEE": "$Z/\\gamma^{*}(\\rightarrow ee)+j$",
"DYJetsToMuMu": "$Z/\\gamma^{*}(\\rightarrow \\mu\\mu)+j$",
"DYJetsToTauTau": "$Z/\\gamma^{*}(\\rightarrow \\tau\\tau)+j$",
"DYJetsToTauLTauL": "$Z/\\gamma^{*}(\\rightarrow \\tau_{l}\\tau_{l})+j$",
"DYJetsToTauHTauL": "$Z/\\gamma^{*}(\\rightarrow \\tau_{l}\\tau_{h})+j$",
"DYJetsToTauHTauH": "$Z/\\gamma^{*}(\\rightarrow \\tau_{h}\\tau_{h})+j$",
"EWKV2Jets": "VBS",
"SingleTop": "Single Top",
"TTJets": "$t\\bar{t}+j$",
"QCD": "QCD multijet",
"G1Jet": "$\\gamma+j$",
"VGamma": "$V+\\gamma$",
"Minor": "Minor",
"MinorBkgs": "Minor",
}
nuisance_names = {
"d1kqcd": r'$\delta^{(1)}k_{\mathrm{QCD}}$',
"d2kqcd": r'$\delta^{(2)}k_{\mathrm{QCD}}$',
"d3kqcd": r'$\delta^{(3)}k_{\mathrm{QCD}}$',
"d1kew": r'$\delta^{(1)}k_{\mathrm{EW}}$',
"d2keww": r'$\delta^{(2)}k_{\mathrm{EW}}^{\mathrm{W}}$',
"d2kewz": r'$\delta^{(2)}k_{\mathrm{EW}}^{\mathrm{Z}}$',
"d3keww": r'$\delta^{(3)}k_{\mathrm{EW}}^{\mathrm{W}}$',
"d3kewz": r'$\delta^{(3)}k_{\mathrm{EW}}^{\mathrm{Z}}$',
"dkmix": r'$\delta k_{\mathrm{mix}}$',
"jesTotal": r'JES',
"jerSF": r'JER',
"unclust": r'Unclustered energy',
"lhePdfWeight": r'PDF',
"btagSF": r'$b$-tag veto',
"photonIdLoose": r'Photon id. veto',
"photonPixelSeedVeto": r'Photon pixel veto',
"tauIdTight": r'$\tau_h$-tag id. selection',
"tauIdVLoose": r'$\tau_h$-tag id. veto',
"muonIdLooseSyst": r'Muon id. veto (syst.)',
"muonIdLooseStat": r'Muon id. veto (stat.)',
"muonIsoLooseSyst": r'Muon iso. veto (syst.)',
"muonIsoLooseStat": r'Muon iso. veto (stat.)',
"muonIdTightSyst": r'Muon id. selection (syst.)',
"muonIdTightStat": r'Muon id. selection (stat.)',
"muonIsoTightSyst": r'Muon iso. selection (syst.)',
"muonIsoTightStat": r'Muon iso. selection (stat.)',
"eleIdIsoVeto": r'Electron id. veto',
"eleIdIsoTight": r'Electron id. selection',
"eleReco": r'Electron reconstruction',
"eleTrig": r'Electron trigger',
"prefiring": r'ECAL timing',
"pileup": r'Pileup',
"lumi": r'Luminosity',
"metTrig0MuSyst": r'$p_{\mathrm{T}}^{\mathrm{miss}}$ trigger ($0\mu$)',
"metTrig1MuSyst": r'$p_{\mathrm{T}}^{\mathrm{miss}}$ trigger ($1\mu$)',
"metTrig2MuSyst": r'$p_{\mathrm{T}}^{\mathrm{miss}}$ trigger ($2\mu$)',
"metTrigReferenceTriggerSyst": r'$p_{\mathrm{T}}^{\mathrm{miss}}$ trigger (ref.)',
"metTrigMonojetSyst": r'$p_{\mathrm{T}}^{\mathrm{miss}}$ trigger ($\p_{\mathrm{T}}^{\mathrm{miss}}+\mathrm{jets}$)',
"metTrigSingleMuonSyst": r'$p_{\mathrm{T}}^{\mathrm{miss}}$ trigger ($\mu+\mathrm{jets}$)',
"metTrigDoubleMuonSyst": r'$p_{\mathrm{T}}^{\mathrm{miss}}$ trigger ($\mu\mu+\mathrm{jets}$)',
"metTrigSingleTauSyst": r'$p_{\mathrm{T}}^{\mathrm{miss}}$ trigger ($\tau_h+\mathrm{jets}$)',
"metTrigSingleElectronSyst": r'$p_{\mathrm{T}}^{\mathrm{miss}}$ trigger ($e+\mathrm{jets}$)',
"metTrigDoubleElectronSyst": r'$p_{\mathrm{T}}^{\mathrm{miss}}$ trigger ($ee+\mathrm{jets}$)',
}
def impacts(data, ax=None, converter=nuisance_names):
if ax is None:
fig, ax = plt.subplots(
figsize=(4,4), dpi=150,
ncols=2, nrows=1,
sharex=False, sharey=True,
gridspec_kw={"hspace": 0., "wspace": 0.},
)
ax[0].minorticks_off()
ax[1].minorticks_off()
ax[1].set_yticklabels([])
y = data["poi_paramdown"].values
x = np.linspace(0., len(y), len(y)+1)
ax[1].hist(
x[:-1], bins=x, weights=y,
color='#1f78b4', alpha=0.8,
orientation='horizontal',
label=r'$-1\sigma$',
)
y = data["poi_paramup"].values
ax[1].hist(
x[:-1], bins=x, weights=y,
color='#e31a1c', alpha=0.8,
orientation='horizontal',
label=r'$+1\sigma$',
)
xmax = np.max(np.abs(ax[1].get_xlim()))
ax[1].set_xlim(-1.1*xmax, 1.1*xmax)
ax[1].set_ylim(0, len(y))
ax[1].axvline(0, lw=1, color='gray', alpha=0.8)
y = data["param_value"].values
yerr = (
-1*data["param_merrdown"].values,
data["param_merrup"].values,
)
ax[0].errorbar(
y, (x[:-1]+x[1:])/2., xerr=yerr,
fmt='o', color='black',
ms=4, capsize=4,
)
xmax = data.eval("param_value+param_merrup").max()
xmax = max(xmax, data.eval("-(param_value+param_merrdown)").max())
xmax = int(xmax)+1
ax[0].set_xlim(-xmax, xmax)
for pos in range(xmax):
ax[0].axvline(pos, lw=1, color='gray', alpha=0.8)
ax[0].axvline(-pos, lw=1, color='gray', alpha=0.8)
ax[0].set_ylim(0, len(y))
ax[0].set_xticks(np.arange(-(xmax-1), (xmax-1)+0.1, 1.))
ax[0].set_yticks((x[:-1]+x[1:])/2.)
labels = [
converter.get(l, l.replace("_", " "))
for l in data.index.get_level_values("param").values
]
ax[0].set_yticklabels(labels)
ax[0].set_xlabel(r'$\theta$')
ax[1].set_xlabel(r'$\Delta\hat{r}$')
ax[1].legend(fancybox=True, edgecolor='#d9d9d9')
return fig, ax
def nllscan(
x, y, ax=None, marker_kw={}, spline_kw={}, splrep_kw={}, splev_kw={},
opt_kw={}, root_kw={}, line_kw={}, text_kw={}, nsigs=[1],
bestfit_guess=[0.], left_bracket=(-np.inf, 0), right_bracket=(0, np.inf),
):
"""
Helper function to plot a -2*Delta(log(L)) scan from a pd.DataFrame with
two columns: x variable and y variable (which should hold the
-2*Delta(log(L)) values.
Parameters
----------
x : np.ndarray-like
The input x variable.
y : np.ndarray-like
The input y variable. Should hold values of -2*Delta(log(L))
ax : matplotlib.axes, optional (default=None)
The axis to draw on.
marker_kw : dict-like, optional (default={})
kwargs to pass to ax.plot. Updates a dict with:
dict(marker='o', ms=2, lw=0., label='Scan', color='#1f78bf')
spline_kw : dict-like, optional (default={})
kwargs to pass to ax.plot. Updates a dict with:
dict(lw=1., label='Spline', color='#e31a1c')
splrep_kw: dict-like, optional (default={})
kwargs to pass to scipy.interpolate.splrep. Updates a dict with:
dict(s=0)
splev_kw: dict-like, optional (default={})
kwargs to pass to scipy.interpolate.splev. Updates a dict with:
dict(der=0)
opt_kw: dict-like, optional (default={})
kwargs to pass to scipy.optimize.optimize. Updates a dict with:
dict(der=0)
root_kw: dict-like, optional (default={})
kwargs to pass to scipy.optimize.root_scalar. Updates a dict with:
dict(method='brentq')
line_kw: dict-like, optional (default={})
kwargs to pass to axes.ax?line. Updates a dict with:
dict(lw=1, ls='--', color='gray')
text_kw: dict-like, optional (default={})
kwargs to pass to axes.text. Updates a dict with:
ict(ha='left', va='bottom', color='gray')
nsigs : list of floats, optional (default=[1])
List of number of sigmas to draw on the final plot
bestfit_guess : list of floats, options (default=[0.])
Best fit guess of the minimum for scipy.optimize
left_bracket : tuple of floats, options (default=(-np.inf, 0))
Guess for left root bracket.
right_bracket : tuple of floats, options (default=(-np.inf, 0))
Guess for right root bracket.
Return
------
pd.DataFrame with columns: nsig and x values
"""
outdata = []
if ax is None:
fig, ax = plt.subplots()
kw = dict(marker='o', ms=2, lw=0., label='Scan', color='#1f78bf')
kw.update(marker_kw)
ax.plot(x, y, **kw)
# spline
kw = dict(s=0)
kw.update(splrep_kw)
tck = interp.splrep(x, y, **kw)
kw = dict(der=0)
kw.update(splev_kw)
kw["tck"] = tck
func = functools.partial(interp.splev, **kw)
xfine = np.linspace(x.min(), x.max(), 201)
kw = dict(lw=1., label='Spline', color='#e31a1c')
kw.update(spline_kw)
ax.plot(xfine, func(xfine), **kw)
kw = dict(method='L-BFGS-B')
kw.update(opt_kw)
bestfit = opt.minimize(func, bestfit_guess, **kw)
outdata.append({"nsig": 0., "xval": bestfit.x[0]})
for nsig in nsigs:
kw = dict(method='brentq')
kw.update(root_kw)
kw["bracket"] = left_bracket
left = opt.root_scalar(lambda x: func(x)-nsig**2, **kw)
outdata.append({"nsig": nsig, "xval": left.root})
kw = dict(method='brentq')
kw.update(root_kw)
kw["bracket"] = right_bracket
right = opt.root_scalar(lambda x: func(x)-nsig**2, **kw)
outdata.append({"nsig": -nsig, "xval": right.root})
kw = dict(lw=1, ls='--', color='gray')
kw.update(line_kw)
ax.axvline(left.root, **kw)
ax.axvline(right.root, **kw)
ax.axhline(nsig**2, **kw)
pos = ax.transData.inverted().transform(
ax.transAxes.transform((0.025, 1))
)
kw = dict(ha='left', va='bottom', color='gray')
kw.update(text_kw)
ax.text(pos[0], nsig**2, f'${nsig}\\sigma$', **kw)
return | pd.DataFrame(outdata) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Combine and normalize tweet.json files into a DataFrame dumped to csv
- Find json files (recursively) within the curent path
- Load those that look like tweets dumped by tweetget
- Expand columns that contain arrays, e.g. geo.coordinates -> geo.coordinates.lat and .lon
- Combine each DataFrame into a single Pandas DataFrame
- Save utf-8 encoded csv file of the normalized/combined DataFrame
"""
from __future__ import division, print_function, absolute_import, unicode_literals
from future.utils import viewitems # noqa
from builtins import str # noqa
from past.builtins import basestring # noqa
try:
from itertools import izip as zip
except ImportError:
pass
import os
import re
import json
import logging
import time
import gzip
import pandas as pd
import progressbar
from twip.futil import find_files
from twip.constant import DATA_PATH
import argparse
import sys
from twip import __version__
__author__ = "hobs"
__copyright__ = "hobs"
__license__ = "mit"
np = pd.np
log = logging.getLogger(__name__)
LOG_FORMAT = '%(levelname)-5s %(module)s.%(funcName)s:%(lineno)d %(message)s'
def parse_args(args):
"""
Parse command line parameters
:param args: command line parameters as list of strings
:return: command line parameters as :obj:`argparse.Namespace`
"""
parser = argparse.ArgumentParser(
description="Concatenate and preprocess tweet json files dumped by tweetget")
parser.add_argument(
'-v',
'--verbose',
help='Whether to show progress messages on stdout',
action='store_true')
parser.add_argument(
'--version',
help='print twip package version and exit.',
action='version',
version='twip {ver}'.format(ver=__version__))
parser.add_argument(
'-p',
'--path',
help='path to data dir (json input and csv output files)',
default=DATA_PATH)
parser.add_argument(
'-t',
'--tweetfile',
help='file to store normalized table (csv) of tweets and features (metadata)',
default='all_tweets.csv')
parser.add_argument(
'-n',
'--numtweets',
help='maximum number of tweets to process',
type=int,
default=1000000)
parser.add_argument(
'-g',
'--geofile',
help="file to store tweet lat/lon information (few use it)",
default='geo_tweets.csv')
return parser.parse_args(args)
def main(args):
"""API with args object containing configuration parameters"""
global logging, log
args = parse_args(args)
logging.basicConfig(format=LOG_FORMAT,
level=logging.DEBUG if args.verbose else logging.INFO,
stream=sys.stdout)
df = cat_tweets(path=args.path, verbosity=args.verbose + 1, numtweets=args.numtweets, ignore_suspicious=False)
log.info('Combined {} tweets'.format(len(df)))
df = drop_nan_columns(df)
save_tweets(df, path=args.path, filename=args.tweetfile)
geo = get_geo(df, path=args.path, filename=args.geofile)
log.info("Combined {} tweets into a single file {} and set asside {} geo tweets in {}".format(
len(df), args.tweetfile, len(geo), args.geofile))
return df, geo
def run():
"""Entry point for command line script"""
main(sys.argv[1:])
def cat_tweets(filename='all_tweets.json.gz', path=DATA_PATH, ext='.json', save_tmp=False, verbosity=1, numtweets=10000000, ignore_suspicious=True):
"""Find json files that were dumped by tweetget and combine them into a single CSV
Normalize some (lat/lon)"""
log.info('Finding {} files in {}...'.format(ext, path))
meta_files = find_files(path=path, ext=ext)
meta_files = [meta for meta in meta_files
if re.match(r'^[-#@a-z ]*201[5-6]-[0-9]{2}-[0-9]{2}.*', meta['name'])]
# '\s[0-9]{2}[:][0-9]{2}[:][0-9]{2}[.][0-9]+[.]json(.gz)?$', meta['name'])]
log.info('Found {} files that look like tweetget dumps.'.format(len(meta_files)))
print([mf['name'] for mf in meta_files])
total_size = sum([meta['size'] for meta in meta_files])
if verbosity > 0:
pbar = progressbar.ProgressBar(maxval=(total_size + 1.) / 1e6)
pbar.start()
else:
pbar = None
loaded_size = 0
df_all = pd.DataFrame()
for meta in meta_files:
with (gzip.open(meta['path']) if ext.endswith('.gz') else open(meta['path'])) as fin:
js = | pd.json.load(fin) | pandas.json.load |
import pandas as pd
class TECRDB_compounds_data(object):
def __init__(self):
"""
A module that processes information of compounds in TECRDB
"""
self.TECRDB_compounds_data_dict = {}
self.TECRDB_compounds_pH7_species_id_dict = {}
self.TECRDB_compounds_least_H_sid_dict = {}
self.get_TECRDB_compounds_data()
def get_TECRDB_compounds_data(self):
"""
reads in data for compounds in TECRDB
:return: a dictionary with keys being different ion bound states of the compound (we call it species_id here, e.g CHB_15422_-1 refers to -1 charged form of
compound_id CHB_15422), values being a dictionary storing the thermodynamic information and molecular properties of the species_id
"""
TECRDB_compounds_data_table = | pd.read_csv('data/TECRDB_compounds_data.csv') | pandas.read_csv |
# ClinVarome annotation functions
# Gather all genes annotations : gene, gene_id,
# (AF, FAF,) diseases, clinical features, mecanismes counts, nhomalt.
# Give score for genes according their confidence criteria
# Commented code is the lines needed to make the AgglomerativeClustering
import pandas as pd
import numpy as np
import pysam
from scipy.stats import poisson
# from sklearn.preprocessing import QuantileTransformer
# from sklearn.cluster import AgglomerativeClustering
from clinvarome.utils.dictionary import (
EFFECTS,
MC_CATEGORIES,
MC_SHORT,
# ARRAY_TRANSFORM,
# CLUSTER_NAMES,
)
import logging
# For logs
def get_logger(scope: str, level=logging.DEBUG):
"""
get_logger
"""
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=level
)
return logging.getLogger(scope)
logger = get_logger(__name__)
# Clinical features
def gather_clinical_features(record, gene_finding, gene_disease):
"""
update gene_finding and gene_disease dictionary using information from a VCF record
"""
geneinfo = record.info["GENEINFO"].split("|")[0].split(":")[0]
if "CLNDISEASE" in record.info:
clndisease = record.info["CLNDISEASE"][0].split("|")
gene_disease.setdefault(geneinfo, [])
gene_disease[geneinfo].append(clndisease)
if "CLNFINDING" in record.info:
clnfinding = record.info["CLNFINDING"][0].split("|")
gene_finding.setdefault(geneinfo, [])
gene_finding[geneinfo].append(clnfinding)
def get_clinical_dataframe(gene_disease, gene_finding):
"""
Process dictionary output from gather_clinical_features function
into a dataframe
"""
for key, value in gene_disease.items():
flat_list = [j for i in value for j in i]
gene_disease[key] = ";".join(sorted(list(set(flat_list))))
gene_disease_df = pd.DataFrame(
gene_disease.items(), columns=["gene_info", "clinical_disease"]
)
for key, value in gene_finding.items():
flat_list = [j for i in value for j in i]
gene_finding[key] = ";".join(sorted(list(set(flat_list))))
gene_finding_df = pd.DataFrame(
gene_finding.items(), columns=["gene_info", "clinical_finding"]
)
gene_features = gene_disease_df.merge(gene_finding_df, how="outer")
return gene_features
# FAF
def calcul_max_AF(AC, AN):
"""
For a given AC and AN, calcul the maximum AF: the
upper bound of the Poisson 95 % CI.
"""
if (AC == 0) and (AN != 0):
max_AF_pois = 1 / AN
elif (AC != 0) and (AN != 0):
max_AC_pois = poisson.ppf(0.95, AC)
max_AF_pois = float(max_AC_pois / AN)
else:
max_AF_pois = 0
return max_AF_pois
def gather_dict_gene_max_AF(record, gene_AF_pois_dict):
"""
Update the maximum FAF of a gene using information in a VCF record
"""
ls_AC = []
ls_AN = []
ls_AF_pois = []
geneinfo = record.info["GENEINFO"].split("|")[0].split(":")[0]
gene_AF_pois_dict.setdefault(geneinfo, [])
if "AC_afr" in record.info:
AC_afr = record.info["AC_afr"]
AC_amr = record.info["AC_amr"]
AC_nfe = record.info["AC_nfe"]
AC_eas = record.info["AC_eas"]
AN_afr = record.info["AN_afr"]
AN_amr = record.info["AN_amr"]
AN_nfe = record.info["AN_nfe"]
AN_eas = record.info["AN_eas"]
ls_AC = [AC_afr, AC_amr, AC_nfe, AC_eas]
ls_AN = [AN_afr, AN_amr, AN_nfe, AN_eas]
for k in range(0, len(ls_AC)):
ls_AF_pois.append(calcul_max_AF(ls_AC[k], ls_AN[k]))
max_af_pois = max(ls_AF_pois)
gene_AF_pois_dict[geneinfo].append(max_af_pois)
else:
gene_AF_pois_dict[geneinfo].append(0)
def get_AF_max_by_gene(gene_AF_dict):
"""For a given gene, return the maximum FAF (among its variants)
and get a dataframe."""
gene_AF_max = {}
for key, values in gene_AF_dict.items():
gene_max_AF = max(values)
gene_AF_max.setdefault(key, [])
gene_AF_max[key].append(gene_max_AF)
print(gene_AF_max)
gene_anno_pois = pd.DataFrame.from_dict(
gene_AF_max, orient="index", columns=["FAF"]
)
gene_anno_pois = gene_anno_pois.reset_index()
gene_anno_pois = gene_anno_pois.rename(columns={"index": "gene_info"})
print(gene_anno_pois)
return gene_anno_pois
# Molecular consequence counts
def mol_consequences_by_variant(record, gene_var_dict):
"""
Parse molecular consequences (mc) available for a variant and
return the highest predicted effect
"""
geneinfo = record.info["GENEINFO"].split("|")[0].split(":")[0]
gene_var_dict.setdefault(geneinfo, [])
if "MC" in record.info:
mc = record.info["MC"]
mc_only = [i.split("|")[1] for i in mc]
min_value = min([v for k, v in EFFECTS.items() if k in mc_only])
for key, value in EFFECTS.items():
if min_value == value:
gene_var_dict[geneinfo].append(MC_CATEGORIES[key])
break
else:
gene_var_dict[geneinfo].append("Not_provided")
def count_type_mol_consequences(gene_var_dict):
"""
Count occurence of molecular consequence (mc)from pathogenic
variant for each gene
"""
gene_mc_count = {}
for key, values in gene_var_dict.items():
list_mc = []
for k in MC_SHORT.keys():
if k in values:
count = values.count(k)
list_mc.append([count, k])
gene_mc_count.setdefault(key, [])
gene_mc_count[key].append(list_mc)
return gene_mc_count
def get_mol_consequences_dataframe(gene_var_dict):
"""
Format molecular consequences occurences (mc) by gene dictionary into dataframe.
"""
gene_mc_count = count_type_mol_consequences(gene_var_dict)
df_tot = pd.DataFrame()
for key, values in gene_mc_count.items():
for k in range(len(values[0])):
mecanism_dict = {}
mecanism_dict[key] = values[0][k]
df = pd.DataFrame.from_dict(
mecanism_dict, orient="index", columns=["count", "mecanism"]
)
df_tot = df_tot.append(df)
df_tot.index.name = "gene_info"
df_tot_piv = pd.pivot_table(
df_tot, values="count", index="gene_info", columns=["mecanism"], fill_value=0,
)
return df_tot_piv
# nhomalt annotation
def get_nhomalt(record, gene_nhomalt):
"""
Return count of homozygous allele in gnomad for a pathogenic variant.
"""
if "nhomalt" in record.info:
nhomalt = record.info["nhomalt"][0]
geneinfo = record.info["GENEINFO"].split("|")[0].split(":")[0]
gene_nhomalt.setdefault(geneinfo, [])
gene_nhomalt[geneinfo].append(nhomalt)
return gene_nhomalt
def get_max_nhomalt_by_gene(gene_nhomalt):
"""
Get the maximum count of homozygous pathogenic allele in gnomad by gene.
Return a dataframe.
"""
gene_nhomalt_max = {}
for key, values in gene_nhomalt.items():
nhomalt_max = max(values)
gene_nhomalt_max.setdefault(key, [])
gene_nhomalt_max[key].append(nhomalt_max)
gene_nhomalt_max_df = pd.DataFrame.from_dict(
gene_nhomalt_max, orient="index", columns=["nhomalt"]
)
gene_nhomalt_max_df = gene_nhomalt_max_df.reset_index()
gene_nhomalt_max_df = gene_nhomalt_max_df.rename(columns={"index": "gene_info"})
return gene_nhomalt_max_df
# Gene date
def gene_first_pathogenic_entry_date(clinvarome_df, compare_gene):
"""
Return the first occurence of a (lickely) pathogenic variant for a gene in ClinVar.
"""
compare_gene_df = | pd.read_csv(compare_gene, sep="\t", compression="gzip") | pandas.read_csv |
from unittest import TestCase # or `from unittest import ...` if on Python 3.4+
from category_encoders.utils import convert_input_vector, convert_inputs
import pandas as pd
import numpy as np
class TestUtils(TestCase):
def test_convert_input_vector(self):
index = [2, 3, 4]
result = convert_input_vector([0, 1, 0], index) # list
self.assertTrue(isinstance(result, pd.Series))
self.assertEqual(3, len(result))
np.testing.assert_array_equal(result.index, [2, 3, 4])
result = convert_input_vector([[0, 1, 0]], index) # list of lists (row)
self.assertTrue(isinstance(result, pd.Series))
self.assertEqual(3, len(result))
np.testing.assert_array_equal(result.index, [2, 3, 4])
result = convert_input_vector([[0], [1], [0]], index) # list of lists (column)
self.assertTrue(isinstance(result, pd.Series))
self.assertEqual(3, len(result))
np.testing.assert_array_equal(result.index, [2, 3, 4])
result = convert_input_vector(np.array([1, 0, 1]), index) # np vector
self.assertTrue(isinstance(result, pd.Series))
self.assertEqual(3, len(result))
np.testing.assert_array_equal(result.index, [2, 3, 4])
result = convert_input_vector(np.array([[1, 0, 1]]), index) # np matrix row
self.assertTrue(isinstance(result, pd.Series))
self.assertEqual(3, len(result))
np.testing.assert_array_equal(result.index, [2, 3, 4])
result = convert_input_vector(np.array([[1], [0], [1]]), index) # np matrix column
self.assertTrue(isinstance(result, pd.Series))
self.assertEqual(3, len(result))
np.testing.assert_array_equal(result.index, [2, 3, 4])
result = convert_input_vector(pd.Series([0, 1, 0], index=[4, 5, 6]), index) # series
self.assertTrue(isinstance(result, pd.Series))
self.assertEqual(3, len(result))
np.testing.assert_array_equal(result.index, [4, 5, 6], 'We want to preserve the original index')
result = convert_input_vector(pd.DataFrame({'y': [0, 1, 0]}, index=[4, 5, 6]), index) # dataFrame
self.assertTrue(isinstance(result, pd.Series))
self.assertEqual(3, len(result))
np.testing.assert_array_equal(result.index, [4, 5, 6], 'We want to preserve the original index')
result = convert_input_vector((0, 1, 0), index) # tuple
self.assertTrue(isinstance(result, pd.Series))
self.assertEqual(3, len(result))
np.testing.assert_array_equal(result.index, [2, 3, 4])
result = convert_input_vector(0, [2]) # scalar
self.assertTrue(isinstance(result, pd.Series))
self.assertEqual(1, len(result))
self.assertTrue(result.index == [2])
result = convert_input_vector('a', [2]) # scalar
self.assertTrue(isinstance(result, pd.Series))
self.assertEqual(1, len(result))
self.assertTrue(result.index == [2])
# multiple columns and rows should cause an error because it is unclear which column/row to use as the target
self.assertRaises(ValueError, convert_input_vector, (pd.DataFrame({'col1': [0, 1, 0], 'col2': [1, 0, 1]})), index)
self.assertRaises(ValueError, convert_input_vector, (np.array([[0, 1], [1, 0], [0, 1]])), index)
self.assertRaises(ValueError, convert_input_vector, ([[0, 1], [1, 0], [0, 1]]), index)
# edge scenarios (it is ok to raise an exception but please, provide then a helpful exception text)
_ = convert_input_vector(pd.Series(dtype=float), [])
_ = convert_input_vector([], [])
_ = convert_input_vector([[]], [])
_ = convert_input_vector(pd.DataFrame(), [])
def test_convert_inputs(self):
aindex = [2, 4, 5]
bindex = [1, 3, 4]
alist = [5, 3, 6]
aseries = | pd.Series(alist, aindex) | pandas.Series |
"""
Technical Analysis Library
Library of functions to compute various technical indicators.
@author: eyu
"""
import logging
import numpy as np
import pandas as pd
import math as math
import statistics as stats
import datetime
import constants as c
# create logger
logger = logging.getLogger("algo-trader")
def copy_column(df, column_source, column_target):
"""
Copy an existing column to a new column in dataframe.
:param df: dataframe (sorted in ascending time order)
:param column_source: name of source column in dataframe with values to copy
:param column_target: name of target column in dataframe for copied values
:return: modified dataframe
"""
df[column_target] = df[column_source]
return df
def copy_column_shift(df, column_source, column_target, shift_amount):
"""
Copy an existing column (shifted by shift_amount) to a new column in dataframe.
:param df: dataframe (sorted in ascending time order)
:param column_source: name of source column in dataframe with values to copy
:param column_target: name of target column in dataframe for copied values
:param shift_amount: amount of rows to shift
:return: modified dataframe
"""
df[column_target] = df[column_source].shift(shift_amount)
return df
def compute_sma_custom(df, column_source, column_target_sma, time_period):
"""
Compute Simple Moving Average (SMA).
:param df: dataframe (sorted in ascending time order)
:param column_source: name of source column in dataframe with values to compute SMA (e.g. close price)
:param column_target_sma: prefix of target column in dataframe for SMA results
:param time_period: time period (number of days for SMA)
:return: modified dataframe
"""
# compute SMA
history_values = []
sma_values = []
for value in df[column_source]:
history_values.append(value)
if len(history_values) > time_period:
del (history_values[0])
sma_values.append(stats.mean(history_values))
# add computed SMA results back to dataframe
key_sma = column_target_sma + "-{:d}".format(time_period)
df[key_sma] = sma_values
return df
def compute_sma(df, column_source, column_target_sma, time_periods):
"""
Compute Simple Moving Average (SMA).
:param df: dataframe (sorted in ascending time order)
:param column_source: name of source column in dataframe with values to compute SMA (e.g. close price)
:param column_target_sma: prefix of target column in dataframe for SMA results
:param time_periods: list of time periods (number of days for SMA)
:return: modified dataframe
"""
# compute SMA for each time period and add results back to dataframe
for time_period in time_periods:
key_sma = column_target_sma + "-{:d}".format(time_period)
df[key_sma] = df[column_source].rolling(window=time_period, min_periods=1).mean()
return df
def compute_ema_custom(
df, column_source, column_target_ema, column_target_golden_cross, column_target_death_cross,
time_period_fast, time_period_slow):
"""
Compute Exponential Moving Average (EMA).
:param df: dataframe (sorted in ascending time order)
:param column_source: name of source column in dataframe with values to compute EMA (e.g. close price)
:param column_target_ema: prefix of target column in dataframe for EMA results
:param column_target_golden_cross: name of target column in dataframe for golden cross results
:param column_target_death_cross: name of target column in dataframe for death cross results
:param time_period_fast: number of days over which to average for fast EMA
:param time_period_slow: number of days over which to average for slow EMA
:return: modified dataframe
"""
# compute EMA
k_fast = 2 / (time_period_fast + 1) # fast EMA smoothing factor
ema_fast = 0
k_slow = 2 / (time_period_slow + 1) # slow EMA smoothing factor
ema_slow = 0
ema_fast_values = []
ema_slow_values = []
for value in df[column_source]:
if ema_fast == 0: # first observation
ema_fast = value
ema_slow = value
else:
ema_fast = (value - ema_fast) * k_fast + ema_fast
ema_slow = (value - ema_slow) * k_slow + ema_slow
ema_fast_values.append(ema_fast)
ema_slow_values.append(ema_slow)
# add computed EMA results back to dataframe
key_ema_fast = column_target_ema + "-{:d}".format(time_period_fast)
key_ema_slow = column_target_ema + "-{:d}".format(time_period_slow)
df[key_ema_fast] = ema_fast_values
df[key_ema_slow] = ema_slow_values
# compute golden cross / death cross
previous_fast_series = df[key_ema_fast].shift(1)
previous_slow_series = df[key_ema_slow].shift(1)
golden_cross_values = []
death_cross_values = []
for i in (range(0, len(df.index))):
golden_cross_values.append(
(ema_fast_values[i] >= ema_slow_values[i]) & (previous_fast_series[i] <= previous_slow_series[i]))
death_cross_values.append(
(ema_fast_values[i] <= ema_slow_values[i]) & (previous_fast_series[i] >= previous_slow_series[i]))
# add computed crossing results back to dataframe
key_golden_cross = column_target_golden_cross + "-{:d}-{:d}".format(time_period_fast, time_period_slow)
key_death_cross = column_target_death_cross + "-{:d}-{:d}".format(time_period_fast, time_period_slow)
df[key_golden_cross] = golden_cross_values
df[key_death_cross] = death_cross_values
return df
def compute_ema(df, column_source, column_target_ema, time_periods):
"""
Compute Exponential Moving Average (EMA).
:param df: dataframe (sorted in ascending time order)
:param column_source: name of source column in dataframe with values to compute EMA (e.g. close price)
:param column_target_ema: prefix of target column in dataframe for EMA results
:param time_periods: list of time periods (number of days for EMA)
:return: modified dataframe
"""
# compute EMA for each time period and add results back to dataframe
for time_period in time_periods:
key_ema = column_target_ema + "-{:d}".format(time_period)
ema_series = df[column_source].ewm(span=time_period, adjust=False).mean()
df[key_ema] = ema_series
return df
def compute_ma_cross(
df, column_source, column_target_golden_cross, column_target_death_cross,
time_period_fast, time_period_slow):
"""
Compute Moving Average (Golden/Death) Crosses.
:param df: dataframe (sorted in ascending time order)
:param column_source: prefix of source column in dataframe with moving average values
:param column_target_golden_cross: name of target column in dataframe for golden cross results
:param column_target_death_cross: name of target column in dataframe for death cross results
:param time_period_fast: number of days over which to average for fast MA
:param time_period_slow: number of days over which to average for slow MA
:return: modified dataframe
"""
# get moving average values
key_ma_fast = column_source + "-{:d}".format(time_period_fast)
key_ma_slow = column_source + "-{:d}".format(time_period_slow)
fast_series = df[key_ma_fast]
slow_series = df[key_ma_slow]
# compute golden cross / death cross and add results back to dataframe
previous_fast_series = df[key_ma_fast].shift(1)
previous_slow_series = df[key_ma_slow].shift(1)
key_golden_cross = column_target_golden_cross + "-{:d}-{:d}".format(time_period_fast, time_period_slow)
key_death_cross = column_target_death_cross + "-{:d}-{:d}".format(time_period_fast, time_period_slow)
df[key_golden_cross] = (fast_series >= slow_series) & (previous_fast_series <= previous_slow_series)
df[key_death_cross] = (fast_series <= slow_series) & (previous_fast_series >= previous_slow_series)
return df
def compute_bb_custom(df, column_source, column_target_bb, time_period, stdev_factor=2):
"""
Compute Bollinger Bands (BB) With Simple Moving Average (SMA).
:param df: dataframe (sorted in ascending time order)
:param column_source: name of source column in dataframe with values to compute SMA (e.g. close price)
:param column_target_bb: prefix of target column in dataframe for BB results
:param time_period: number of days over which to average
:param stdev_factor: standard deviation scaling factor for upper and lower bands
:return: modified dataframe
"""
# compute BB
history_values = []
sma_values = []
upper_band_values = []
lower_band_values = []
for value in df[column_source]:
history_values.append(value)
if len(history_values) > time_period:
del (history_values[0])
sma = stats.mean(history_values)
sma_values.append(sma)
variance = 0 # variance is the square of standard deviation
for history_value in history_values:
variance = variance + ((history_value - sma) ** 2)
stdev = math.sqrt(variance / len(history_values)) # use sqrt to get standard deviation
upper_band_values.append(sma + (stdev_factor * stdev))
lower_band_values.append(sma - (stdev_factor * stdev))
# add computed BB results back to dataframe
key_sma = column_target_bb + "-sma-{:d}-{:d}".format(time_period, stdev_factor)
key_upper_band = column_target_bb + "-upper-{:d}-{:d}".format(time_period, stdev_factor)
key_lower_band = column_target_bb + "-lower-{:d}-{:d}".format(time_period, stdev_factor)
df[key_sma] = sma_values
df[key_upper_band] = upper_band_values
df[key_lower_band] = lower_band_values
return df
def compute_bb(df, column_source, column_target_bb, time_period, stdev_factor=2):
"""
Compute Bollinger Bands (BB) With Simple Moving Average (SMA).
:param df: dataframe (sorted in ascending time order)
:param column_source: name of source column in dataframe with values to compute SMA (e.g. close price)
:param column_target_bb: prefix of target column in dataframe for BB results
:param time_period: number of days over which to average
:param stdev_factor: standard deviation scaling factor for upper and lower bands
:return: modified dataframe
"""
# compute BB and add results back to dataframe
key_sma = column_target_bb + "-sma-{:d}-{:d}".format(time_period, stdev_factor)
key_upper_band = column_target_bb + "-upper-{:d}-{:d}".format(time_period, stdev_factor)
key_lower_band = column_target_bb + "-lower-{:d}-{:d}".format(time_period, stdev_factor)
df[key_sma] = df[column_source].rolling(window=time_period, min_periods=1).mean()
sma_stdev = df[column_source].rolling(window=time_period, min_periods=1).std(ddof=0)
df[key_upper_band] = df[key_sma] + (sma_stdev * stdev_factor)
df[key_lower_band] = df[key_sma] - (sma_stdev * stdev_factor)
return df
def compute_macd_custom(
df, column_source, column_target_ema,
column_target_macd, column_target_macd_signal, column_target_macd_histogram,
time_period_fast, time_period_slow, time_period_macd):
"""
Compute Moving Average Convergence Divergence (MACD).
When fast ema crosses above slow ema, it indicates a reversal from downtrend to uptrend.
When fast ema crosses below slow ema, it indicates a reversal from uptrend to downtrend.
When macd crosses above ema_macd (signal), it indicates a reversal from downtrend to uptrend.
When macd crosses below ema_macd (signal), it indicates a reversal from uptrend to downtrend.
:param df: dataframe (sorted in ascending time order)
:param column_source: name of source column in dataframe with values to compute MACD (e.g. close price)
:param column_target_ema: prefix of target column in dataframe for EMA results
:param column_target_macd: name of target column in dataframe for MACD results
:param column_target_macd_signal: name of target column in dataframe for MACD signal results
:param column_target_macd_histogram: name of target column in dataframe for MACD histogram results
:param time_period_fast: number of days over which to average for fast EMA
:param time_period_slow: number of days over which to average for slow EMA
:param time_period_macd: number of days over which to average for MACD EMA
:return: modified dataframe
"""
k_fast = 2 / (time_period_fast + 1) # fast EMA smoothing factor
ema_fast = 0
k_slow = 2 / (time_period_slow + 1) # slow EMA smoothing factor
ema_slow = 0
k_macd = 2 / (time_period_macd + 1) # MACD EMA smoothing factor
ema_macd = 0
ema_fast_values = []
ema_slow_values = []
macd_values = []
macd_signal_values = [] # EMA of MACD values
macd_histogram_values = [] # MACD - MACD-EMA
for value in df[column_source]:
# compute MACD
if ema_fast == 0: # first observation
ema_fast = value
ema_slow = value
else:
ema_fast = (value - ema_fast) * k_fast + ema_fast
ema_slow = (value - ema_slow) * k_slow + ema_slow
ema_fast_values.append(ema_fast)
ema_slow_values.append(ema_slow)
macd = ema_fast - ema_slow
macd_values.append(macd)
# compute MACD signal and histogram
if ema_macd == 0: # first observation
ema_macd = macd
else:
ema_macd = (macd - ema_macd) * k_macd + ema_macd # signal is EMA of MACD values
macd_signal_values.append(ema_macd)
macd_histogram_values.append(macd - ema_macd)
# add computed results back to dataframe
time_fast = str(time_period_fast)
time_slow = str(time_period_slow)
time_fast_slow_macd = time_fast + "-" + time_slow + "-" + str(time_period_macd)
key_ema_fast = column_target_ema + "-" + time_fast
key_ema_slow = column_target_ema + "-" + time_slow
key_macd = column_target_macd + "-" + time_fast_slow_macd
key_macd_signal = column_target_macd_signal + "-" + time_fast_slow_macd
key_macd_histogram = column_target_macd_histogram + "-" + time_fast_slow_macd
df[key_ema_fast] = ema_fast_values
df[key_ema_slow] = ema_slow_values
df[key_macd] = macd_values
df[key_macd_signal] = macd_signal_values
df[key_macd_histogram] = macd_histogram_values
return df
def compute_macd(
df, column_source, column_target_ema,
column_target_macd, column_target_macd_signal, column_target_macd_histogram,
time_period_fast, time_period_slow, time_period_macd):
"""
Compute Moving Average Convergence Divergence (MACD).
When fast ema crosses above slow ema, it indicates a reversal from downtrend to uptrend.
When fast ema crosses below slow ema, it indicates a reversal from uptrend to downtrend.
When macd crosses above ema_macd (signal), it indicates a reversal from downtrend to uptrend.
When macd crosses below ema_macd (signal), it indicates a reversal from uptrend to downtrend.
:param df: dataframe (sorted in ascending time order)
:param column_source: name of source column in dataframe with values to compute MACD (e.g. close price)
:param column_target_ema: prefix of target column in dataframe for EMA results
:param column_target_macd: name of target column in dataframe for MACD results
:param column_target_macd_signal: name of target column in dataframe for MACD signal results
:param column_target_macd_histogram: name of target column in dataframe for MACD histogram results
:param time_period_fast: number of days over which to average for fast EMA
:param time_period_slow: number of days over which to average for slow EMA
:param time_period_macd: number of days over which to average for MACD EMA
:return: modified dataframe
"""
time_fast_slow_macd = "{:d}-{:d}-{:d}".format(time_period_fast, time_period_slow, time_period_macd)
key_ema_fast = column_target_ema + "-{:d}".format(time_period_fast)
key_ema_slow = column_target_ema + "-{:d}".format(time_period_slow)
key_macd = column_target_macd + "-" + time_fast_slow_macd
key_macd_signal = column_target_macd_signal + "-" + time_fast_slow_macd
key_macd_histogram = column_target_macd_histogram + "-" + time_fast_slow_macd
# compute EMA and add results back to dataframe
df[key_ema_fast] = df[column_source].ewm(span=time_period_fast, adjust=False).mean()
df[key_ema_slow] = df[column_source].ewm(span=time_period_slow, adjust=False).mean()
# compute MACD and add results back to dataframe
df[key_macd] = df[key_ema_fast] - df[key_ema_slow]
df[key_macd_signal] = df[key_macd].ewm(span=time_period_macd, adjust=False).mean()
df[key_macd_histogram] = df[key_macd] - df[key_macd_signal]
return df
def compute_rsi(df, column_source, column_target_avg_gain, column_target_avg_loss, column_target_rsi, time_periods):
"""
Compute Relative Strength Indicator (RSI).
RSI values over 50% indicate an uptrend, while values below 50% indicate a downtrend.
:param df: dataframe (sorted in ascending time order)
:param column_source: name of source column in dataframe with values to compute RSI (e.g. close price)
:param column_target_avg_gain: name of target column in dataframe for average gain results
:param column_target_avg_loss: name of target column in dataframe for average loss results
:param column_target_rsi: name of target column in dataframe for RSI results
:param time_periods: list ot time periods (in days) over which to look back to compute gains and losses
:return: modified dataframe
"""
# compute RSI over time period and add results back to dataframe
for time_period in time_periods:
gain_history_values = [] # history of gains over look back period (0 if no gain, magnitude of gain if gain)
loss_history_values = [] # history of loss over look back period (0 if no loss, magnitude if loss)
avg_gain_values = []
avg_loss_values = []
rsi_values = []
last_value = 0 # current_value - last_value > 0 ==> gain; current_value - last_value < 0 ==> loss
for value in df[column_source]:
if last_value == 0: # first observation
last_value = value
# compute average gain and loss
gain_history_values.append(max(0, value - last_value))
loss_history_values.append(max(0, last_value - value))
last_value = value
if len(gain_history_values) > time_period: # maximum observations is equal to look back period
del (gain_history_values[0])
del (loss_history_values[0])
avg_gain = stats.mean(gain_history_values) # average gain over look back period
avg_loss = stats.mean(loss_history_values) # average loss over look back period
avg_gain_values.append(avg_gain)
avg_loss_values.append(avg_loss)
# compute RS and RSI
rs = 0
if avg_loss > 0: # to avoid division by 0
rs = avg_gain / avg_loss
rsi = 100 - (100 / (1 + rs))
rsi_values.append(rsi)
# add computed results back to dataframe
key_avg_gain = column_target_avg_gain + "-{:d}".format(time_period)
key_avg_loss = column_target_avg_loss + "-{:d}".format(time_period)
key_rsi = column_target_rsi + "-{:d}".format(time_period)
df[key_avg_gain] = avg_gain_values
df[key_avg_loss] = avg_loss_values
df[key_rsi] = rsi_values
return df
def compute_change(df, column_source, column_target_change, column_target_change_pc, time_periods):
"""
Compute the change and percentage change of the values in the source column for the specified period in (trading)
days.
:param df: dataframe (sorted in ascending time order)
:param column_source: name of source column in dataframe with values to compute change (e.g. close price)
:param column_target_change: name of target column in dataframe for change to add to dataframe
:param column_target_change_pc: name of target column in dataframe for change pc to add to dataframe
:param time_periods: list of time periods in (trading) days
:return: modified dataframe
"""
# compute change over time period and add result back to dataframe
for time_period in time_periods:
key_change = column_target_change + "-{:d}".format(time_period)
key_change_pc = column_target_change_pc + "-{:d}".format(time_period)
#df2 = df[column_source].asfreq("D", method="ffill")
#change_series = df2.diff(time_period)
#change_pc_series = df2.pct_change(time_period)
#df[key_change] = change_series
#df[key_change_pc] = change_pc_series
change_series = df[column_source].diff(time_period)
change_pc_series = df[column_source].pct_change(time_period)
df[key_change] = change_series
df[key_change_pc] = change_pc_series
return df
def compute_daily_change_between_current_and_previous(
df, column_source_current, column_source_previous,
column_target_daily_change, column_target_daily_change_pc):
"""
Compute the daily change and daily percentage change between the current and previous values.
:param df: dataframe (sorted in ascending time order)
:param column_source_current: name of source column in dataframe with current values to compute (e.g. current open price)
:param column_source_previous: name of source column in dataframe with previous values to compute (e.g. previous close price)
:param column_target_daily_change: name of target column in dataframe for daily change to add to dataframe
:param column_target_daily_change_pc: name of target column in dataframe for daily change pc to add to dataframe
:return: modified dataframe
"""
# NOT CORRECT?
daily_change = df[column_source_current] - df[column_source_previous].shift(1)
# daily_change_pc = daily_change.pct_change(1)
daily_change_pc = df[column_source_previous].pct_change(1)
# add computed results back to dataframe
df = pd.concat([df, daily_change.rename(column_target_daily_change)], axis=1)
df = pd.concat([df, daily_change_pc.rename(column_target_daily_change_pc)], axis=1)
return df
def compute_52_week_range(df, column_source_low, column_source_high, column_target_low, column_target_high):
"""
Compute 52 Week Range (Low~High).
:param df: dataframe (sorted in ascending time order)
:param column_source_low: name of source column in dataframe with low values to compute
:param column_source_high: name of source column in dataframe with high values to compute
:param column_target_low: name of target column in dataframe for low range results to add to dataframe
:param column_target_high: name of target column in dataframe for high range results to add to dataframe
:return: modified dataframe
"""
# compute rolling 52 week range and add result back to dataframe
df[column_target_low] = df[column_source_low].asfreq("D").rolling(window=52*7, min_periods=1).min();
df[column_target_high] = df[column_source_high].asfreq("D").rolling(window=52*7, min_periods=1).max();
return df
def compute_change_pc_above(df, column_source1, column_source2, column_target, column_target_pc):
"""
Compute the percentage of source1 above source2 (e.g. close price above the 52 week low price).
:param df: dataframe (sorted in ascending time order)
:param column_source1: name of source1 column in dataframe with values to compute (e.g. close price)
:param column_source2: name of source2 column in dataframe with values to compute (e.g. 52 week low price)
:param column_target: name of target column in dataframe for change results to add to dataframe
:param column_target_pc: name of target column in dataframe for percentage change results to add to dataframe
:return: modified dataframe
"""
change_above = df[column_source1] - df[column_source2]
pc_above = (df[column_source1] / df[column_source2]) - 1
# add computed results back to dataframe
df = pd.concat([df, change_above.rename(column_target)], axis=1)
df = pd.concat([df, pc_above.rename(column_target_pc)], axis=1)
return df
def compute_change_pc_below(df, column_source1, column_source2, column_target, column_target_pc):
"""
Compute the percentage of source1 below source2 (e.g. close price below the 52 week high price).
:param df: dataframe (sorted in ascending time order)
:param column_source1: name of source1 column in dataframe with values to compute (e.g. close price)
:param column_source2: name of source2 column in dataframe with values to compute (e.g. 52 week high price)
:param column_target: name of target column in dataframe for change results to add to dataframe
:param column_target_pc: name of target column in dataframe for percentage change results to add to dataframe
:return: modified dataframe
"""
change_below = df[column_source2] - df[column_source1]
pc_below = 1 - (df[column_source1] / df[column_source2])
# add computed results back to dataframe
df = pd.concat([df, change_below.rename(column_target)], axis=1)
df = pd.concat([df, pc_below.rename(column_target_pc)], axis=1)
return df
def compute_sharpe(df, column_source, column_target, N=252):
"""
Compute the sharpe ratio of the (e.g. daily) return values in the source column.
:param df: dataframe (sorted in ascending time order)
:param column_source: name of source column in dataframe with values to compute sharpe ratio (e.g. daily returns)
:param column_target: name of target column in dataframe for sharpe ratio to add to dataframe
:param N: number of trading periods (e.g. 252 = daily, 12 = monthly)
:return: modified dataframe
"""
# compute the sharpe ratio and add result back to dataframe
return_series = df[column_source]
df[column_target] = np.sqrt(N) * return_series.mean() / return_series.std()
return df
def compute_cumulative_total_return(df, column_price):
"""
Cumulative return on an investment is the aggregate amount that the investment has gained or lost over time,
independent of the amount of time involved.
cumulative total return = (price_end - price_start) / price_start = (price_end/price_start) - 1
:param df: dataframe (sorted in ascending time order)
:param column_price: name of source column in dataframe with price values (adjusted for splits and dividends) to
compute cumulative total return
:return: cumulative total return
"""
# compute cumulative total return
price_start = df[column_price][0]
price_end = df[column_price][-1]
cumulative_return = (price_end - price_start)/price_start
return cumulative_return
def compute_annualized_total_return_over_years(df, column_price, years):
"""
Computed the annualized total return over the specified number of years.
This is equivalent to Compound Annual Growth Rate (CAGR).
Note: If the period is less than one year, it is best not to use annualized total return as it could result in a
very large (positive or negative) number that is not meaningful.
:param df: dataframe (sorted in ascending time order)
:param column_price: name of source column in dataframe with price values (adjusted for splits and dividends) to
compute annualized total return
:param years: time period in years (e.g. 1 = 1 year, 2 = 2 years, 2.5 = 1 year and 6 months, etc.)
:return: annualized total return over years
"""
# compute cumulative total return
total_return = compute_cumulative_total_return(df, column_price)
# compute annualized total returns over months
annualized_total_return = ((1 + total_return)**(1/years)) - 1
return annualized_total_return
def compute_annualized_total_return_over_months(df, column_price, months):
"""
Computed the annualized total return over the specified number of months.
This is equivalent to Compound Annual Growth Rate (CAGR).
Note: If the period is less than one year, it is best not to use annualized total return as it could result in a
very large (positive or negative) number that is not meaningful.
:param df: dataframe (sorted in ascending time order)
:param column_price: name of source column in dataframe with price values (adjusted for splits and dividends) to
compute annualized total return
:param months: time period in months (e.g. 1 = 1 month, 2 = 2 months, 2.5 = 1 month and ~15 days, etc.)
:return: annualized total return over months
"""
# calculate cumulative total return
total_return = compute_cumulative_total_return(df, column_price)
# calculate annualized total returns over months
annualized_total_return = ((1 + total_return)**(12/months)) - 1
return annualized_total_return
def compute_annualized_total_return_over_calendar_days(df, column_price):
"""
Computed the annualized total return over the provided number of calendar days.
This is equivalent to Compound Annual Growth Rate (CAGR).
Note: Using days (versus years or months) provides the most precise form of annualized return calculation.
Note: If the period is less than one year, it is best not to use annualized total return as it could result in a
very large (positive or negative) number that is not meaningful.
:param df: dataframe (sorted in ascending time order)
:param column_price: name of source column in dataframe with price values (adjusted for splits and dividends) to
compute annualized total return
:return: annualized total return over days
"""
# calculate cumulative total return
total_return = compute_cumulative_total_return(df, column_price)
# fill in missing calendar days
index_filled = pd.date_range(min(df.index), max(df.index))
df_filled = df.reindex(index_filled, method="ffill")
# number of calendar days in data
# note: dataframe includes one day before the desired range; for example, if we want to get the annualized total
# return from 4/1/2000 to 3/31/2002, the dataframe will contain data from 3/31/2000 to 3/31/2002; as a result,
# the number of calendar days is (len(df) - 1)
calendar_days = len(df_filled) - 1
# calculate annualized total returns over days
annualized_total_return = ((1 + total_return)**(c.CALENDAR_DAYS/calendar_days)) - 1
return annualized_total_return
def compute_annualized_total_return_over_trading_days(df, column_price):
"""
Computed the (trailing) annualized total return over the provided number of trading days.
This is equivalent to Compound Annual Growth Rate (CAGR).
Note: Using days (versus years or months) provides the most precise form of annualized return calculation.
Note: If the period is less than one year, it is best not to use annualized total return as it could result in a
very large (positive or negative) number that is not meaningful.
:param df: dataframe (sorted in ascending time order)
:param column_price: name of source column in dataframe with price values (adjusted for splits and dividends) to
compute annualized total return
:return: annualized total return over days
"""
# calculate cumulative total return
total_return = compute_cumulative_total_return(df, column_price)
# number of trading days in data
# note: dataframe includes one day before the desired range; for example, if we want to get the annualized total
# return from 4/1/2000 to 3/31/2002, the dataframe will contain data from 3/31/2000 to 3/31/2002; as a result,
# the number of trading days is (len(df) - 1)
trading_days = len(df) - 1
# calculate annualized total returns over number of trading days
annualized_total_return = ((1 + total_return)**(c.TRADING_DAYS_YEAR/trading_days)) - 1
return annualized_total_return
def compute_trailing_returns(df, symbol):
dict_returns = {"symbol": symbol}
# compute total return (trailing 1 month)
end = df.index[-1]
start = end - pd.DateOffset(months=1)
dict_returns[c.TRAILING_RETURN + "-1m"] = compute_cumulative_total_return(df.loc[start:end], c.CLOSE)
# compute total return (trailing 3 month)
end = df.index[-1]
start = end - | pd.DateOffset(months=3) | pandas.DateOffset |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/8 22:08
Desc: 金十数据中心-经济指标-美国
https://datacenter.jin10.com/economic
"""
import json
import time
import pandas as pd
import demjson
import requests
from akshare.economic.cons import (
JS_USA_NON_FARM_URL,
JS_USA_UNEMPLOYMENT_RATE_URL,
JS_USA_EIA_CRUDE_URL,
JS_USA_INITIAL_JOBLESS_URL,
JS_USA_CORE_PCE_PRICE_URL,
JS_USA_CPI_MONTHLY_URL,
JS_USA_LMCI_URL,
JS_USA_ADP_NONFARM_URL,
JS_USA_GDP_MONTHLY_URL,
)
# 东方财富-美国-未决房屋销售月率
def macro_usa_phs():
"""
未决房屋销售月率
http://data.eastmoney.com/cjsj/foreign_0_5.html
:return: 未决房屋销售月率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
'type': 'GJZB',
'sty': 'HKZB',
'js': '({data:[(x)],pages:(pc)})',
'p': '1',
'ps': '2000',
'mkt': '0',
'stat': '5',
'pageNo': '1',
'pageNum': '1',
'_': '1625474966006'
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(',') for item in data_json['data']])
temp_df.columns = [
'时间',
'前值',
'现值',
'发布日期',
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
return temp_df
# 金十数据中心-经济指标-美国-经济状况-美国GDP
def macro_usa_gdp_monthly():
"""
美国国内生产总值(GDP)报告, 数据区间从20080228-至今
https://datacenter.jin10.com/reportType/dc_usa_gdp
:return: pandas.Series
2008-02-28 0.6
2008-03-27 0.6
2008-04-30 0.9
2008-06-26 1
2008-07-31 1.9
...
2019-06-27 3.1
2019-07-26 2.1
2019-08-29 2
2019-09-26 2
2019-10-30 0
"""
t = time.time()
res = requests.get(
JS_USA_GDP_MONTHLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国国内生产总值(GDP)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "53",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gdp"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国CPI月率报告
def macro_usa_cpi_monthly():
"""
美国CPI月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_cpi
https://cdn.jin10.com/dc/reports/dc_usa_cpi_all.js?v=1578741110
:return: 美国CPI月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_CPI_MONTHLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国居民消费价格指数(CPI)(月环比)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "9",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "cpi_monthly"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国核心CPI月率报告
def macro_usa_core_cpi_monthly():
"""
美国核心CPI月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_core_cpi
https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v=1578740570
:return: 美国核心CPI月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心CPI月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "6",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_core_cpi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国个人支出月率报告
def macro_usa_personal_spending():
"""
美国个人支出月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_personal_spending
https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v=1578741327
:return: 美国个人支出月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国个人支出月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "35",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_personal_spending"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国零售销售月率报告
def macro_usa_retail_sales():
"""
美国零售销售月率报告, 数据区间从19920301-至今
https://datacenter.jin10.com/reportType/dc_usa_retail_sales
https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v=1578741528
:return: 美国零售销售月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国零售销售月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "39",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_retail_sales"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国进口物价指数报告
def macro_usa_import_price():
"""
美国进口物价指数报告, 数据区间从19890201-至今
https://datacenter.jin10.com/reportType/dc_usa_import_price
https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v=1578741716
:return: 美国进口物价指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国进口物价指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "18",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_import_price"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国出口价格指数报告
def macro_usa_export_price():
"""
美国出口价格指数报告, 数据区间从19890201-至今
https://datacenter.jin10.com/reportType/dc_usa_export_price
https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v=1578741832
:return: 美国出口价格指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国出口价格指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "79",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_export_price"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-LMCI
def macro_usa_lmci():
"""
美联储劳动力市场状况指数报告, 数据区间从20141006-至今
https://datacenter.jin10.com/reportType/dc_usa_lmci
https://cdn.jin10.com/dc/reports/dc_usa_lmci_all.js?v=1578742043
:return: 美联储劳动力市场状况指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_LMCI_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美联储劳动力市场状况指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "93",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "lmci"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-失业率-美国失业率报告
def macro_usa_unemployment_rate():
"""
美国失业率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_unemployment_rate
https://cdn.jin10.com/dc/reports/dc_usa_unemployment_rate_all.js?v=1578821511
:return: 获取美国失业率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_UNEMPLOYMENT_RATE_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国失业率"] for item in json_data["list"]]
value_df = | pd.DataFrame(value_list) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
单变量分析中常用工具,主要包含以下几类工具:
1、自动分箱(降基)模块:包括卡方分箱、Best-ks分箱
2、基本分析模块,单变量分析工具,以及woe编码工具,以及所有变量的分析报告
3、单变量分析绘图工具,如AUC,KS,分布相关的图
"""
# Author: <NAME>
import numpy as np
import pandas as pd
from abc import abstractmethod
from abc import ABCMeta
from sklearn.utils.multiclass import type_of_target
from pandas.api.types import is_numeric_dtype
import warnings
import time
from openpyxl import Workbook
from openpyxl.utils.dataframe import dataframe_to_rows
class Kit(object):
"""
常用工具类
"""
def __init__(self, positive_label=1, negative_label=0):
self.positive_label = positive_label
self.negative_label = negative_label
pass
def cond_insert_ind(self, cond, ind):
"""
为分箱结果中添加需要独立分箱的部分
cond : 待处理的分箱结果
arr : 需要独立分箱的数据集合,不管是数值型还是非数值型变量cond,请使用list添加
"""
if isinstance(cond, list):
cond = list(set(cond + ind))
cond.sort()
else:
n = len(ind)
arr = list(set(ind).difference(set(cond.keys())))
for k, v in cond.items():
cond[k] = v + n
for i in range(len(arr)):
cond[arr[i]] = i
return cond
def make_bin(self, df, var_name, cond, precision=3):
"""
基于cond中的分箱条件,为df中var_name的变量匹配对应的分箱值
"""
if isinstance(cond, list):
df["bin"] = pd.cut(df[var_name], cond, duplicates='drop', precision=precision)
elif isinstance(cond, dict):
mapping = pd.Series(cond).reset_index().rename({"index": var_name, 0: "bin"}, axis=1)
df = df[[var_name]].merge(mapping, on=var_name, how='left').set_index(df[[var_name]].index)
else:
raise ValueError("参数cond的类型只能为list或者dict")
return df["bin"]
def woe_code(self, df, var_name, woeDict):
"""
对样本的数据进行woe编码,返回完成编码后的
"""
if isinstance(df[var_name].dtype, pd.core.dtypes.dtypes.CategoricalDtype):
mapping = pd.Series(woeDict).reset_index().rename({"index": var_name, 0: "woe"}, axis=1)
breaks = mapping[var_name].to_list()
breaks.insert(0, -np.inf)
mapping[var_name] = pd.cut(mapping[var_name], breaks, duplicates="drop")
else:
mapping = pd.Series(woeDict).reset_index().rename({"index": var_name, 0: "woe"}, axis=1)
df = df.merge(mapping, on=var_name, how='left').set_index(df.index)
return df["woe"]
def univerate(self, df, var_name, target, lamb=0.001, retWoeDict=False):
"""
单变量分析函数,目前支持的计算指标为 IV,KS,LIFT
建议用于编码后的数值型变量进行分析,若在前面使用了cond_insert方法调整了cond
"""
# dti = pd.crosstab(df[var_name], df[target])
dti = df.groupby([var_name, target])[target].count().unstack().fillna(0)
dti.rename({self.positive_label: "positive", self.negative_label: "negative"}, axis=1, inplace=True)
dti["positive"] = dti["positive"].astype(int)
dti["negative"] = dti["negative"].astype(int)
p_t = dti["positive"].sum()
n_t = dti["negative"].sum()
t_t = p_t + n_t
r_t = p_t / t_t
dti["total"] = dti["positive"] + dti["negative"]
dti["total_rate"] = dti["total"] / t_t
dti["positive_rate"] = dti["positive"] / dti["total"] # (rs["positive"] + rs["negative"])
dti["negative_cum"] = dti["negative"].cumsum()
dti["positive_cum"] = dti["positive"].cumsum()
dti["woe"] = np.log(((dti["negative"] / n_t) + lamb) / ((dti["positive"] / p_t) + lamb))
dti["LIFT"] = dti["positive_rate"] / r_t
dti["KS"] = np.abs((dti["positive_cum"] / p_t) - (dti["negative_cum"] / n_t))
dti["IV"] = (dti["negative"] / n_t - dti["positive"] / p_t) * dti['woe']
IV = dti["IV"].sum()
KS = dti["KS"].max()
dti["IV"] = IV
dti["KS"] = KS
dti = dti.reset_index()
dti.columns.name = None
dti.rename({"Total": "num", var_name: "bin"}, axis=1, inplace=True)
dti.insert(0, "target", [target] * dti.shape[0])
dti.insert(0, "var", [var_name] * dti.shape[0])
if retWoeDict:
if isinstance(dti["bin"].dtype, pd.core.dtypes.dtypes.CategoricalDtype):
dti["v"] = dti["bin"].map(lambda x: x.right)
else:
dti["v"] = dti["bin"]
woeDict = pd.Series(dti["woe"].values, index=dti["v"].values).to_dict()
# # 修正根据分箱后,空分组,对应的woe值
# if cond0 is not None:
# right0 = set(cond0[1:])
# right1 = set(woeDict.keys())
# for key in right0.difference(right1):
# woeDict[key] = 0
dti.drop(columns=["negative_cum", "positive_cum", "v"], inplace=True)
return dti, woeDict
dti.drop(columns=["negative_cum", "positive_cum"], inplace=True)
return dti
def is_numeric(self, series):
"""
判断变量是否为数值型变量
"""
return is_numeric_dtype(series)
def missing_count(self, series):
"""
计算变量缺失率
"""
missing_index = pd.isna(series)
return missing_index.sum()
def unique_count(self, series):
"""
计算变量的枚举值数量
"""
unique_arr = pd.unique(series)
return unique_arr.size
def csi(self, base, df, var_name):
"""
计算不同数据集之间,同一个变量csi
"""
count1 = base.groupby(var_name)[var_name].count()
count2 = df.groupby(var_name)[var_name].count()
t1 = count1.sum()
t2 = count2.sum()
c1 = count1 / t1
c2 = count2 / t2
csi = (c1 - c2) * np.log(c1 / c2)
return csi.sum()
def group_rs(self, data, group, sum_col=[], count_col=[], rate_tupes=[]):
"""
业务分析工具类,同时对比计算多个target指标,查看结果
data : 数据集
sum_col : 需要group_sum的列
count_col : 需要group_count的列
rate_tupe : 需要除法计算的列 格式为 (字段1,字段2,新列名称) 或者 (字段,新列名称)
"""
grouped = data.groupby(group)
grouped_count = grouped[count_col].count()
grouped_sum = grouped[sum_col].sum()
grouped = pd.concat([grouped_count, grouped_sum], axis=1)
for tup in rate_tupes:
size = len(tup)
if size == 3:
grouped[tup[2]] = grouped[tup[0]] / grouped[tup[1]]
if size == 2:
grouped[tup[1]] = grouped[tup[0]] / grouped[tup[0]].sum()
return grouped.reset_index()
def batch_fillna(self, df, var_list, num_fill=-1, cate_fill="NA", suffix="_new"):
"""
批量填充缺失值
"""
for var_name in var_list:
var_name_new = var_name + suffix
if self.is_numeric(df[var_name]):
df[var_name_new] = df[var_name].fillna(num_fill)
else:
df[var_name_new] = df[var_name].fillna(cate_fill)
return df
def varlist_suffix(self, var_list, suffix):
return [var_name + suffix for var_name in var_list]
def feature_engine(self, datas, var_list, target, discretize, max_bin=6, precision=4, num_fill=-1, cate_fill="NA",
num_ind=None, cate_ind=None, fill_suffix="_fill", bin_suffix="_bin", woe_suffix="_woe",
path=None):
"""
批量对数据集进行自动化分箱和编码
Parameters
----------
datas: 数据集,为dataframe的list,第一个数据集为训练集
var_list: 特征列表
target : 目标值
discretize : 分箱工具类
max_bin : 最大分箱数
num_fill : 数值型变量填充结果
cate_fill : 类别型变量填充结果
num_ind : 数值型变量中,需要独立插入的分箱 为 list
cate_ind : 字符型变量中,需要独立进行分箱的值 为 list
fill_suffix : 处理确实
bin_suffix : 分箱后生成对应分箱的后缀
woe_suffix : woe编码后的编码的后缀
retInfoDict : 返回分箱后的变量信息,采用嵌套的dict格式,单个变量的相关信息如下:
变量名 : { "cond" , "woeDict" }
"""
assert len(datas) >= 1, "至少需要一个数据集"
train = datas[0]
all_data = | pd.concat(datas, axis=1) | pandas.concat |
import pandas as pd
import sqlite3
import sys
import datetime
_db_path = 'Database\Database.db'
class GetData(object):
@staticmethod
def Equity(Ticker, Start=None, End = None):
connection = sqlite3.connect(_db_path)
cursor = connection.cursor()
if (Start == None) & (End == None):
cursor.execute("SELECT Date, Open, High, Low, Close, Volume, Dividends, AdjClose FROM Equity WHERE Ticker = ?",(Ticker,))
elif End == None:
cursor.execute("SELECT Date, Open, High, Low, Close, Volume, Dividends, AdjClose FROM Equity WHERE Ticker = ? AND Date >= ?",(Ticker, Start))
elif Start == None:
cursor.execute("SELECT Date, Open, High, Low, Close, Volume, Dividends, AdjClose FROM Equity WHERE Ticker = ? AND Date <= ?",(Ticker, End))
elif (Start != None) & (End != None):
cursor.execute("SELECT Date, Open, High, Low, Close, Volume, Dividends, AdjClose FROM Equity WHERE Ticker = ? AND Date BETWEEN ? AND ?",(Ticker, Start, End))
else:
raise Exception("Error in getting data from db(Equity).")
data = cursor.fetchall()
connection.close()
Date_list = []
Open_list = []
High_list = []
Low_list = []
Close_list = []
Volume_list = []
Dividends_list = []
AdjClose_list = []
for row in data:
Date_list.append(row[0])
Open_list.append(row[1])
High_list.append(row[2])
Low_list.append(row[3])
Close_list.append(row[4])
Volume_list.append(row[5])
Dividends_list.append(row[6])
AdjClose_list.append(row[7])
df = pd.DataFrame(index = Date_list, columns = ["Open","High","Low","Close","Volume","Dividends","AdjClose"])
df.index = pd.to_datetime(df.index)
df['Open'] = Open_list
df['High'] = High_list
df['Low'] = Low_list
df['Close'] = Close_list
df['Volume'] = Volume_list
df['Dividends'] = Dividends_list
df['AdjClose'] = AdjClose_list
df.sort_index()
df.index.name = 'Date'
return df
@staticmethod
def Get_EquityName(Ticker):
connection = sqlite3.connect(_db_path)
cursor = connection.cursor()
cursor.execute("SELECT Name FROM EquityList WHERE Ticker = ?",(Ticker,))
data = cursor.fetchall()
connection.close()
return data[0][0]
@staticmethod
def Multi_Equity(Tickers, Start = None, End = None):
if (Start == None) & (End == None):
Start = '1980-01-01'
End = datetime.datetime.today().strftime('%Y-%m-%d')
elif End == None:
End = datetime.datetime.today().strftime('%Y-%m-%d')
elif Start == None:
Start = '1980-01-01'
else:
raise Exception("Error in datetime parameters.")
df = pd.DataFrame(columns = Tickers, index = pd.date_range(start = Start, end = End, freq = 'D'))
for Ticker in Tickers:
df[Ticker] = GetData.Equity(Ticker, Start, End)['AdjClose']
return df.dropna(how = 'all')
@staticmethod
def Futures(Ticker, Start=None, End = None):
connection = sqlite3.connect(_db_path)
cursor = connection.cursor()
if (Start == None) & (End == None):
cursor.execute("SELECT Date, Open, High, Low, Settle, Volume, OI FROM Futures WHERE Ticker = ?",(Ticker,))
elif End == None:
cursor.execute("SELECT Date, Open, High, Low, Settle, Volume, OI FROM Futures WHERE Ticker = ? AND Date >= ?",(Ticker, Start))
elif Start == None:
cursor.execute("SELECT Date, Open, High, Low, Settle, Volume, OI FROM Futures WHERE Ticker = ? AND Date <= ?",(Ticker, End))
elif (Start != None) & (End != None):
cursor.execute("SELECT Date, Open, High, Low, Settle, Volume, OI FROM Futures WHERE Ticker = ? AND Date BETWEEN ? AND ?",(Ticker, Start, End))
else:
raise Exception("Error in getting data from db(Futures).")
data = cursor.fetchall()
connection.close()
Date_list = []
Open_list = []
High_list = []
Low_list = []
Settle_list = []
Volume_list = []
OI_list = []
for row in data:
Date_list.append(row[0])
Open_list.append(row[1])
High_list.append(row[2])
Low_list.append(row[3])
Settle_list.append(row[4])
Volume_list.append(row[5])
OI_list.append(row[6])
df = pd.DataFrame(index = Date_list, columns = ["Open","High","Low","Settle","Volume","OI"])
df.index = pd.to_datetime(df.index)
df['Open'] = Open_list
df['High'] = High_list
df['Low'] = Low_list
df['Settle'] = Settle_list
df['Volume'] = Volume_list
df['OI'] = OI_list
df.sort_index()
return df
@staticmethod
def Macro(Ticker, Start=None, End = None):
connection = sqlite3.connect(_db_path)
cursor = connection.cursor()
if (Start == None) & (End == None):
cursor.execute("SELECT Date,Value FROM Futures WHERE Ticker = ?",(Ticker,))
elif End == None:
cursor.execute("SELECT Date,Value FROM Futures WHERE Ticker = ? AND Date >= ?",(Ticker, Start))
elif Start == None:
cursor.execute("SELECT Date,Value FROM Futures WHERE Ticker = ? AND Date <= ?",(Ticker, End))
elif (Start != None) & (End != None):
cursor.execute("SELECT Date,Value FROM Futures WHERE Ticker = ? AND Date BETWEEN ? AND ?",(Ticker, Start, End))
else:
raise Exception("Error in getting data from db(Macro).")
data = cursor.fetchall()
connection.close()
Date_list = []
Value_list = []
for row in data:
Date_list.append(row[0])
Value_list.append(row[1])
df = pd.DataFrame(index = Date_list, columns = ["Value"])
df.index = pd.to_datetime(df.index)
df['Value'] = Value_list
df.sort_index()
return df
@staticmethod
def Summary_table():
connection = sqlite3.connect(_db_path)
cursor = connection.cursor()
cursor.execute("SELECT * FROM Summary_table")
data = cursor.fetchall()
connection.close()
Category_list = []
Ticker_list = []
Start_list = []
End_list = []
Count_list = []
for row in data:
Category_list.append(row[0])
Ticker_list.append(row[1])
Start_list.append(row[2])
End_list.append(row[3])
Count_list.append(row[4])
df = pd.DataFrame(index = range(len(Ticker_list)),columns = ['Category','Ticker','Start','End','Count'])
df['Category'] = Category_list
df['Ticker'] = Ticker_list
df['Start'] = Start_list
df['End'] = End_list
df['Count'] = Count_list
return df
class InsertData(object):
@staticmethod
def Equity(Ticker,df):
connection = sqlite3.connect(_db_path)
cursor = connection.cursor()
NumberOfData = len(df)
for i in range(NumberOfData):
Date = str(df.index[i])[:10]
Open = df['Open'][i]
High = df['High'][i]
Low = df['Low'][i]
Close = df['Close'][i]
Volume = df['Volume'][i]
Dividends = df['Dividends'][i]
AdjClose = df['AdjClose'][i]
LastUpdated = df['LastUpdated'][i]
cursor.execute("INSERT OR REPLACE INTO Equity VALUES (?,?,?,?,?,?,?,?,?,?)",(Ticker,Date, float(Open), float(High), float(Low), float(Close), int(Volume), str(Dividends), float(AdjClose), str(LastUpdated)))
sys.stderr.write("[info] wrote %s rows to db. \n" % (len(df)))
connection.commit()
connection.close()
sys.stderr.write("[info] %s data created successfully.\n" % (Ticker))
@staticmethod
def Futures(Ticker,df):
connection = sqlite3.connect(_db_path)
cursor = connection.cursor()
NumberOfData = len(df)
for i in range(NumberOfData):
Date = str(df.index[i])[:10]
Open = df['Open'][i]
High = df['High'][i]
Low = df['Low'][i]
Settle = df['Settle'][i]
Volume = df['Volume'][i]
OI = df['OI'][i]
cursor.execute("INSERT OR REPLACE INTO Futures VALUES (?,?,?,?,?,?,?,?)",(Ticker,Date, Open, High, Low, Settle, Volume, OI))
sys.stderr.write("[info] wrote %s rows to db. \n" % (len(df)))
connection.commit()
connection.close()
sys.stderr.write("[info] %s data created successfully.\n" % (Ticker))
@staticmethod
def Macro(Ticker,df):
connection = sqlite3.connect(_db_path)
cursor = connection.cursor()
NumberOfData = len(df)
for i in range(NumberOfData):
Date = str(df.index[i])[:10]
Value = df['Value'][i]
cursor.execute("INSERT OR REPLACE INTO Macro VALUES (?,?,?)",(Ticker,Date, Value))
sys.stderr.write("[info] wrote %s rows to db. \n" % (len(df)))
connection.commit()
connection.close()
sys.stderr.write("[info] %s data created successfully.\n" % (Ticker))
class DeleteData(object):
@staticmethod
def Equity(Ticker):
connection = sqlite3.connect(_db_path)
cursor = connection.cursor()
cursor.execute("DELETE FROM Equity WHERE Ticker = ?",(Ticker,))
connection.commit()
connection.close()
@staticmethod
def Futures(Ticker):
connection = sqlite3.connect(_db_path)
cursor = connection.cursor()
cursor.execute("DELETE FROM Futures WHERE Ticker = ?",(Ticker,))
connection.commit()
connection.close()
@staticmethod
def Macro(Ticker):
connection = sqlite3.connect(_db_path)
cursor = connection.cursor()
cursor.execute("DELETE FROM Macro WHERE Ticker = ?",(Ticker,))
connection.commit()
connection.close()
@staticmethod
def SummaryData():
connection = sqlite3.connect(_db_path)
cursor = connection.cursor()
cursor.execute("DELETE FROM Summary_table")
connection.commit()
connection.close()
class SummaryData(object):
@staticmethod
def Equity():
connection = sqlite3.connect(_db_path)
cursor = connection.cursor()
cursor.execute("SELECT Ticker, MIN(Date), MAX(Date), COUNT(Ticker) FROM Equity GROUP BY Ticker")
data = cursor.fetchall()
for each in data:
cursor.execute("INSERT OR REPLACE INTO Summary_table VALUES (?,?,?,?,?)",('Equity',each[0],each[1],each[2],each[3]))
connection.commit()
connection.close()
@staticmethod
def Futures():
connection = sqlite3.connect(_db_path)
cursor = connection.cursor()
cursor.execute("SELECT Ticker, MIN(Date), MAX(Date), COUNT(Ticker) FROM Futures GROUP BY Ticker")
data = cursor.fetchall()
for each in data:
cursor.execute("INSERT OR REPLACE INTO Summary_table VALUES (?,?,?,?,?)",('Futures',each[0],each[1],each[2],each[3]))
connection.commit()
connection.close()
@staticmethod
def Macro():
connection = sqlite3.connect(_db_path)
cursor = connection.cursor()
cursor.execute("SELECT Ticker, MIN(Date), MAX(Date), COUNT(Ticker) FROM Macro GROUP BY Ticker")
data = cursor.fetchall()
for each in data:
cursor.execute("INSERT OR REPLACE INTO Summary_table VALUES (?,?,?,?,?)",('Macro',each[0],each[1],each[2],each[3]))
connection.commit()
connection.close()
class Metadata(object):
@staticmethod
def Insert_FRED():
path = 'metadata/FRED_metadata.csv'
data = pd.read_csv(path)
connection = sqlite3.connect(_db_path)
cursor = connection.cursor()
for i in range(len(data)):
cursor.execute("INSERT INTO FRED_metadata VALUES (?,?,?,?,?,?)",(data.ix[i][0],data.ix[i][1],data.ix[i][2],data.ix[i][3],data.ix[i][4],data.ix[i][5]))
connection.commit()
connection.close()
@staticmethod
def Insert_CHRIS():
path = 'metadata/CHRIS_metadata.csv'
data = | pd.read_csv(path) | pandas.read_csv |
import numpy as np
import pandas as pd
from pandas import DataFrame
import matplotlib.pyplot as plt
import seaborn as sns
from os.path import exists
from pathlib import Path
from scipy.constants import value
def change_width(ax, new_value) :
for patch in ax.patches :
current_width = patch.get_width()
diff = current_width - new_value
# we change the bar width
patch.set_width(new_value)
# we recenter the bar
patch.set_x(patch.get_x() + diff * .5)
def load_consistency_result(filename):
data = pd.read_csv(filename, header=None)
data = data.iloc[:, 0:2].copy()
#print(data)
data.columns = ["Ontology", "IsConsitency"]
return data
def load_evaluation_csv(file_name):
columns = ["Ontology", "Run1", "Run2", "Run3", "Run4", "Run5", "Run6", "Run7", "Run8", "Run9", "Run10", "Mean",
"Median"]
data = pd.read_csv(file_name, header=None, names=columns)
total = data.shape[0]
isNumeric = data.iloc[:, 1:11].copy()
isNumeric = isNumeric.apply(lambda s: pd.to_numeric(s, errors='coerce').notnull().all(), axis=1)
isNumeric = isNumeric.index[isNumeric].tolist()
timeout = data.applymap(lambda x: 'timeout' in str(x).lower() ).any(axis=1)
timeout = data[timeout].copy()
time_count = timeout.shape[0]
#data = data.apply(lambda x: "Timeout" if 'timeout' in str(x).lower() and x.name in ["Mean"] else x, axis=1)
inconstent = data.applymap(lambda x: 'inconsistentontology' in str(x).lower()).any(axis=1)
inconstent = data[inconstent].copy()
inconstent_count = inconstent.shape[0]
#data = data.apply(lambda x: "Inconsistent Error" if 'inconsistentontology' in str(x).lower() and x.name in ["Mean"] else x, axis=1)
mem = data.applymap(lambda x: 'outofmemory' in str(x).lower()).any(axis=1)
mem = data[mem].copy()
mem_count = mem.shape[0]
#data = data.apply(lambda x: "Inconsistent Error" if 'outofmemory' in str(x).lower() and x.name in ["Mean"] else x, axis=1)
#data = data.apply(lambda x: "Other Error" if np.isnan(x["Mean"] and x.name in ["Mean"]) else x, axis=1)
new_column = data.apply(lambda x: x["Run1"] if np.isnan(x["Mean"]) else x["Mean"], axis=1)
new_column = new_column.apply(lambda x: "OutOfMemeory" if 'outofmemory' in str( x).lower() else x)
new_column = new_column.apply(lambda x: "Inconsistent Error" if 'inconsistentontology' in str(x).lower() else x)
new_column = new_column.apply(lambda x: "Timeout" if 'timeout' in str(x).lower() else x)
new_column = new_column.apply(lambda x: "Other Error" if 'timeout' not in str(x).lower() and 'inconsistentontology' not in str(x).lower() and 'outofmemory' not in str(x).lower() and not np.isreal(x) else x)
data["Mean"] = new_column
new_data = data.iloc[isNumeric].copy()
return new_data, data, total, time_count, inconstent_count, mem_count
if __name__ == '__main__':
#stat_csv_file = "C:\\Users\\anl\\SINTEF\\Skytrack@SINTEF - Documents\\General\\Task T1.3\\EvaluationResult\\ORE2015_Statistics.csv"
stat_realization_csv_file = "C:\\Users\\anl\\SINTEF\\Skytrack@SINTEF - Documents\\General\\Task T1.3\\EvaluationResult\\ORE2015_Realization_Ontology_Statistics.csv"
#input_folder: str = "C:\\Users\\anl\\SINTEF\\Skytrack@SINTEF - Documents\\General\\Task T1.3\\EvaluationResult\\ORE2015\\"
# output_folder = "./output/"
#ore2015=True
stat_csv_file = "C:\\Users\\anl\\SINTEF\\Skytrack@SINTEF - Documents\\General\\Task T1.3\\EvaluationResult\\BioOntology_Statistics.csv"
input_folder: str = "C:\\Users\\anl\\SINTEF\\Skytrack@SINTEF - Documents\\General\\Task T1.3\\EvaluationResult\\Bio\\"
output_folder = "./output/Bio/"
ore2015 = False
Path(output_folder).mkdir(parents=True, exist_ok=True)
reasoner_name = ["Factpp", "HermiT", "JFact", "Konclude", "Openllet", "Pellet", "KoncludeCLI"]
task_name = [ "Loading", "Consistency", "Classification", "Realization"]
ontology_loading_file = input_folder + "Ontology_Loading.csv"
t_ontology_loading, old_data, total, timeout, inconsistent_error_count, mem = load_evaluation_csv(ontology_loading_file)
ontology_loading = t_ontology_loading[["Ontology", "Mean"]].copy()
ontology_loading.columns = ["Ontology", "Ontology Loading"]
stats = pd.read_csv(stat_csv_file)
if ore2015:
stats_realization = | pd.read_csv(stat_realization_csv_file) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
| tm.assert_frame_equal(df, expected) | pandas.util.testing.assert_frame_equal |
#Merges two CSV files and saves the final result
import pandas as pd
import sys
df1 = pd.read_csv(sys.argv[1])
df2 = pd.read_csv(sys.argv[2])
df = | pd.concat([df1, df2], ignore_index=True) | pandas.concat |
import pandas as pd
import numpy as np
# personal csv reader module
import reader
def count_number(array, number):
"""
Counts the occurrence of number in array.
"""
count = 0
for entry in array:
if entry == number:
count += 1
return count
def count_numbers(array, numbers):
"""
Counts the occurrence of each number in array.
"""
count = []
for number in numbers:
count.append(count_number(array, number))
return count
# filename prefixes
FILE_PREFIX_XGB = "../output/xgb/task2_xgb_nativ_["
FILE_PREFIX_MLP = "../output/mlp_lbgfs/task2_mlp_lbgfs_["
# filename suffix
FILE_SUFFIX = "].csv"
# last file index e.g. [5]
INDEX_LAST_FILE = 6
# read training file
files = []
# read all existing files
for i in range(1, INDEX_LAST_FILE + 1):
files.append(reader.read_csv(FILE_PREFIX_MLP + str(i) + FILE_SUFFIX, False))
rows = []
# create a list for all values of the same row
for i in range(0, 3000):
row = []
for file in files:
row.append(file.iloc[i][1])
rows.append(row)
average_values = []
for i in range(0, 3000):
count = count_numbers(rows[i], [0, 1, 2])
# check which value has the highest count
if count[0] > count[1]:
if count[0] > count[2]:
average_values.append(0)
else:
average_values.append(2)
elif count[0] > count[2]:
average_values.append(1)
else:
if count[1] > count[2]:
average_values.append(1)
else:
average_values.append(2)
# print each row including the average value
print("Row-Number:", i+2000,"0s:", count[0], "\t1s:", count[1], "\t2s:", count[2], "\t-overfit-value:", average_values[i])
# preparing to write the coefficients to file
out = {"Id" : files[0]['Id'], "y": average_values}
# output data frame
out = | pd.DataFrame(data=out, dtype=np.int16) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
| Timestamp("2000-02-15", tz="US/Central") | pandas.Timestamp |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.