prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""see https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/"""
import matplotlib
matplotlib.use('TkAgg')
import numpy as np
from matplotlib import pyplot
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dropout
from keras.layers import Dense
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from matplotlib import pyplot
from pandas import read_csv
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
# put it all together
agg = | concat(cols, axis=1) | pandas.concat |
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import gridspec
from fuzzywuzzy import process
from matplotlib.backends.backend_pdf import PdfPages
from itertools import product
def pie_chart_from_well(well_fld_mean, ax=None, well_id=None, plot_legend=False, cmap=None, norm=None):
"""Basic function for Cycif QC. Makes a radar plot where each brach represents an column in the input data.
Different columns are shown in different colors. Metadata must have field, plate and well information.
Paramerters
--------
ax : None or matplotlib.Axes
Current ax to plot on.
well_id : None or str
Name of the well to be used as figure title, if None, no title are drawn.
plot_legend : bool
Whether or not to plot legend, should be turned off if used in a wrapper.
cmap : matplotlib.colors.Colormap
cmap object for color mapping.
norm : matplotlib.colors
color range mapping.
"""
if cmap is None:
cmap = sns.light_palette("green", as_cmap=True)
if isinstance(well_fld_mean, pd.DataFrame):
well_fld_mean = well_fld_mean.iloc[:, 0]
if norm is None:
norm = matplotlib.colors.Normalize(vmin=0, vmax=15, clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap=cmap)
colors = [mapper.to_rgba(x) for x in well_fld_mean.values]
ax = ax or plt.gca()
if ax is None:
ax = plt.subplot(111, polar=True)
well_fld_mean.fillna(0, inplace=True)
ax.pie([1] * well_fld_mean.shape[0],
colors=colors, labels=well_fld_mean.index)
ax.set_title(well_id, loc='left', fontdict={'fontsize': 14}, pad=2)
def get_fld_mean(plate_meta, expr_data, well_col='Well', fld_col='fld'):
"""Calculate per field mean of each field in each well. Assumes samples
are from the same plate.
Parameters
--------
plate_meta : pandas.DataFrame
a table of metadata, rows as cell names and columns as metadata.
expr_data : pandas.DataFrame
a table of expression data, rows as cell names and columns as
channel/markers.
Returns
--------
fld_mean : pandas.DataFrame
a table of per field per well mean intensity of each channel.
"""
expr_data = expr_data.reindex(plate_meta.index).copy()
expr_data[well_col] = plate_meta[well_col]
expr_data[fld_col] = plate_meta[fld_col]
fld_mean = expr_data.groupby([well_col, fld_col]).mean()
return fld_mean
def process_channel_info(channel_info, expr_data):
"""Static function for processing channel info.
"""
channel_info = channel_info[channel_info.Channel != 'DAPI'].copy()
channel_info.sort_values(['Cycle', 'Channel'], inplace=True)
channel_info.index = np.arange(channel_info.shape[0])
choices = channel_info.Marker.values
for col in expr_data.columns:
if 'DNA' in col:
marker = 'Hoechst'
cycle = 1
elif ' (alpha-isoform)' in col:
marker = process.extract(col.replace(
' (alpha-isoform)', 'alpha'), choices, limit=1)[0][0]
elif 'bckgrnd.1' in col:
marker = 'cy3 bckgrnd'
elif 'bckgrnd.2' in col:
marker = 'cy5 bckgrnd'
elif 'bckgrnd.3' in col:
marker = 'FITC bckgrnd'
else:
marker = process.extract(col, choices, limit=1)[0][0]
marker_idx = channel_info[channel_info.Marker == marker].index
channel_info.loc[marker_idx, 'expr_col'] = col
channel_info_Nuclei = channel_info.copy()
channel_info_Nuclei.expr_col = channel_info_Nuclei.expr_col.apply(
lambda x: x.replace('Cytoplasm', 'Nuclei'))
channel_info = channel_info.append(channel_info_Nuclei)
channel_info.index = np.arange(channel_info.shape[0])
channel_info = channel_info[channel_info.Marker != 'empty']
return channel_info
def plate_QC_pdf(plate_fld_mean, channel_info, figname):
"""Generate a multipage pdf file for cells on each cycif plate.
Each page is organized in a 3 X 3 grid where rows represent cycles
and columns represent different channels.
Parameters
--------
plate_fld_mean : pandas.DataFrame
a table of per well per field intensity mean of each channel/marker.
channel_info : pandas.DataFrame
a table of channel/marker metadata including cycle the marker is measured,
Must have columns named 'Cycle', 'Channel', 'Marker', and 'expr_col'.
This table should be already sorted by ['Cycle', 'Channel'] and indexed.
column name of the marker in the 'plate_fld_mean' DataFrame, dye used. These
combined determined the location on the pdf file of the particular marker.
figname : str
figure name to be used.
"""
pdf = PdfPages(figname)
marker_orders = channel_info.index.values
k = 0
wells = []
for row, col in product(['C', 'D', 'E'], np.arange(4, 11)):
wells.append(row + str(col).zfill(2))
# splitting the channel/markers into pages. Each row in the `channel_info' dataframe
# will be shown as a subplot, and thus for every 9th row, a new page will
# be created.
while k * 9 < marker_orders.max():
# get data for the new page.
new_page_channel_info = channel_info[
(marker_orders < 9 * (k + 1)) & (marker_orders >= 9 * k)]
# adjusting the index to fit in the 3x3 indexing system
new_page_channel_info.index = [
x - 9 * k for x in new_page_channel_info.index]
# using a outer grid of 3x3 and inner grid of 3X7 for each page
fig = plt.figure(figsize=(32, 16))
outer = gridspec.GridSpec(3, 3, wspace=0.1, hspace=0.1)
all_flds = ['fld' + str(x) for x in range(1, 10)]
# iterate through each row. Again, each row will be represented as a
# subplot.
for i in range(9):
try:
col_name = new_page_channel_info.loc[i, 'expr_col']
except KeyError:
continue
fld_mean_col = plate_fld_mean[col_name]
cycle = new_page_channel_info.loc[i, 'Cycle']
# setting colors
if new_page_channel_info.loc[i, 'Channel'] == 'FITC':
cmap = sns.light_palette("green", as_cmap=True)
elif new_page_channel_info.loc[i, 'Channel'] == 'cy3':
cmap = sns.light_palette("red", as_cmap=True)
elif new_page_channel_info.loc[i, 'Channel'] == 'cy5':
cmap = sns.light_palette("purple", as_cmap=True)
# color values are normlized to the plate-wise max of each
# chennel/marker.
color_scale = matplotlib.colors.Normalize(
vmin=0, vmax=fld_mean_col.max(), clip=True)
# write figure annotations including channel name and cycles.
# Channel names
col_title_x = 0.24 + int(i % 3) * 0.27
col_title_y = 0.9 - (cycle % 3 * 0.262)
fig.text(col_title_x, col_title_y, col_name,
horizontalalignment='center', fontsize='x-large')
# Cycles
if i % 3 == 0:
fig.text(0.12, 0.78 - cycle % 3 * 0.26, 'Cycle ' + str(cycle),
rotation='vertical', horizontalalignment='center', fontsize='x-large')
# plotting inner grids. Organized by wells. Rows as well C, D, E and columns from
# X04 to X10.
inner = gridspec.GridSpecFromSubplotSpec(3, 7,
subplot_spec=outer[i],
wspace=0.02,
hspace=0.02)
available_wells = plate_fld_mean.index.levels[0]
for j in range(21):
# subsetting data for the current well to be plotted.
current_well = wells[j]
if current_well not in available_wells:
continue
fld_mean_well = fld_mean_col.loc[current_well]
missing_flds = [
x for x in all_flds if x not in fld_mean_well.index]
for missing_fld in missing_flds:
fld_mean_well[missing_fld] = 0
fld_mean_well.index = [
x.replace('fld', '') for x in fld_mean_well.index]
fld_mean_well.sort_index(inplace=True)
# Add subplot using the pie_chart_from_well.
ax = plt.Subplot(fig, inner[j])
pie_chart_from_well(
fld_mean_well, ax=ax, cmap=cmap, norm=color_scale, well_id=current_well)
fig.add_subplot(ax)
pdf.savefig(fig)
plt.close()
k += 1
pdf.close()
"""
=== Functions below are not used currently.
"""
def bar_chart_from_well(fld_mean, ax=None, well_id=None, plot_legend=False, cmap=None):
"""Basic function for Cycif QC. Makes a radar plot where each brach represents an column in the input data.
Different columns are shown in different colors. Metadata must have field, plate and well information.
Paramerters
--------
ax : None or matplotlib.Axes
Current ax to plot on.
well_id : None or str
Name of the well to be used as figure title, if None, no title are drawn.
plot_legend : bool
Whether or not to plot legend, should be turned off if used in a wrapper.
"""
fld_mean = fld_mean.iloc[:, 0]
if cmap is None:
cmap = sns.color_palette("Set1", n_colors=9)
cmap = pd.Series(
cmap, index=['fld' + str(x) for x in range(1, 10)])
ax = ax or plt.gca()
if ax is None:
ax = plt.subplot(111, polar=True)
fld_mean.fillna(0, inplace=True)
for idx in fld_mean.index:
ax.bar(idx, fld_mean[idx], color=cmap[idx], label=idx)
ax.set_title(well_id, loc='left', fontdict={'fontsize': 14}, pad=10)
def QC_plot_all(metadata, expr, combine_output_fn=None, plate_col='Plate', well_col='Well', replicate_col=None, **kwargs):
"""Overall wrapper function for experiment-wise QC. Makes field-wise of mean intensity plot for each well in each plate.
Metadata must have field, plate and well information.
Paramerters
--------
metadata : pandas.DataFrame
Table of Cycif data metadata. Must have matching indicies to expr.
expr : pandas.DataFrame or pandas.Series.
Table of cycif expression data. Rows as cells and columns as channels. Best log normalized.
Can be used to visualize a single channel if expr is passed as a Series.
combine_output_fn : None or str
Name of pdf file that all figures are combined into. If let None, each plate will be represented as one figure.
"""
if combine_output_fn is not None:
pdf = PdfPages(combine_output_fn)
for plate in metadata.groupby(plate_col):
plate_id, plate_meta = plate
plate_meta = plate_meta.sort_values(well_col)
if combine_output_fn is not None:
fig_title = ' '.join([expr.name, 'plate', str(plate_id)])
fig = QC_plot_from_plate(
plate_meta, expr, well_col=well_col, fig_title=fig_title, **kwargs)
pdf.savefig(fig)
plt.close()
else:
if replicate_col is None:
figname = 'Plate {}.png'.format(plate_id)
else:
figname = 'Replicate {} Plate {}.png'.format(
plate_meta[replicate_col][0], plate_id)
QC_plot_from_plate(plate_meta, expr, fig_name=figname,
well_col=well_col, fig_title=None, **kwargs)
if combine_output_fn is not None:
pdf.close()
def QC_plot_from_plate(plate_meta, expr, plot_fun='pie',
fig_name=None, well_col='Well', field_col='fld', size=5, fig_title=None):
"""Wrapper function for plate-wise QC. Makes field-wise of mean intensity plot for each well.
Metadata must have field, plate and well information.
Paramerters
--------
plate_meta : pandas.DataFrame
Table of Cycif data metadata of a ceritain plate. Must have matching indicies to expr.
expr : pandas.DataFrame or pandas.Series.
Table of cycif expression data. Rows as cells and columns as channels. Best log normalized.
Can be used to visualize a single channel if expr is passed as a Series.
(well,field)_col : str
Column names for well and field metadata in 'plate_meta'
fig_name : None or str
Name of output file, if None, show figure in console.
size : numeric
Size factor of each subplots.
Returns
--------
fig : matplotlib.figure
Figure object to be used in pdf outputs.
"""
# define plotting functions.
subplot_kw = None
if plot_fun == 'pie':
plot_fun = pie_chart_from_well
elif plot_fun == 'rader':
plot_fun = radar_plot_from_well
subplot_kw = dict(polar=True)
elif plot_fun == 'bar':
plot_fun = bar_chart_from_well
plot_cmap = 'Blues'
expr = pd.DataFrame(expr)
expr_cols = expr.columns
plate_meta = plate_meta.merge(
expr, left_index=True, right_index=True, how='left', sort=False)
plate_meta = plate_meta.sort_values([well_col, field_col])
num_wells = plate_meta[well_col].unique().shape[0]
ncols = 7
nrows = int(np.ceil(num_wells / ncols))
plate_df_fld_mean = plate_meta.groupby(['Well', 'fld'], sort=False).mean()
plate_df_fld_mean.reset_index(inplace=True)
fld_mean_max = plate_df_fld_mean.iloc[:, -1].max()
color_scale = matplotlib.colors.Normalize(
vmin=0, vmax=fld_mean_max, clip=True)
# get all field names.
all_unique_fileds = sorted(plate_df_fld_mean[field_col].unique())
fig, axes = plt.subplots(nrows, ncols,
subplot_kw=subplot_kw,
figsize=(ncols * 4, nrows * 4),
sharex=True,
sharey=True)
axes = axes.ravel()
idx = 0
for well in plate_df_fld_mean.groupby(well_col):
well_id, well_df = well
fld_mean = | pd.DataFrame(0, index=all_unique_fileds, columns=expr_cols) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 10 14:24:56 2021
@author: <NAME>
International connected electricity sector
- NO3 connected to DK1
- SE3 connected to DK1
- SE4 connected to DK2
- DE connected to DK1
- DE connected to DK2
- NL connected to DK1
- Possible to add CO2 constraint
- To remove a country from simulation -> comment out the section. Be
aware of plots.
Reads data for the period 2017 dowloaded from
data.open-power-system-data.org
Capacity factor is determined using installed capacity per production type
data from www.transparency.entsoe.eu
"""
#%% Import and define
import pypsa
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.dates as mdates
from pandas.tseries.offsets import DateOffset
def annuity(n,r):
"""Calculate the annuity factor for an asset with lifetime n years and
discount rate of r, e.g. annuity(20,0.05)*20 = 1.6"""
if r > 0:
return r/(1. - 1./(1.+r)**n)
else:
return 1/n
# Create network and snapshot
network = pypsa.Network()
hours_in_2017 = pd.date_range('2017-01-01T00:00Z','2017-12-31T23:00Z', freq='H')
network.set_snapshots(hours_in_2017)
# Load data: Demand and enerators for 6 regions
df_elec = pd.read_csv('data/2017_entsoe.csv', sep=',', index_col=0) # in MWh
df_elec.index = pd.to_datetime(df_elec.index) #change index to datatime
df_heat = | pd.read_csv('data/heat_demand.csv', sep=';', index_col=0) | pandas.read_csv |
import pandas as pd
import numpy as np
import wfdb
from wfdb import processing
import matplotlib.pyplot as plt
import seaborn as sns
import os
from wfdb.processing.hr import calc_rr
import json
# import pyhrv.time_domain as td
# import pyhrv.frequency_domain as fd
# import pyhrv.nonlinear as nl
from IPython.display import clear_output
import warnings
import pickle
import math
plt.ioff()
warnings.filterwarnings("ignore")
sns.set()
def Calculate_HRV(Patient_ID):
# record = wfdb.rdrecord('mitdb/mit-bih-arrhythmia-database-1.0.0/' + str(Patient_ID) + '', )
annotation = wfdb.rdann('./mitdb/mit-bih-arrhythmia-database-1.0.0/' + str(Patient_ID) + '', 'atr',)
# print(len(annotation.sample))
# print(len(annotation.symbol))
Arr = annotation.symbol
# print(Arr[:10])
# print(len(Arr))
Table = | pd.DataFrame(columns=["RR Peaks", "RR Intervals", "Symbols", "Mask", "Elements_To_Skip", "To_Skip_NN50"]) | pandas.DataFrame |
"""Rules for conversion between SQL, pandas, and odbc data types."""
import pandas as pd
import pyodbc
rules = pd.DataFrame.from_records(
[
{
"sql_type": "bit",
"sql_category": "boolean",
"min_value": False,
"max_value": True,
"pandas_type": "boolean",
"odbc_type": pyodbc.SQL_BIT,
"odbc_size": 1,
"odbc_precision": 0,
},
{
"sql_type": "tinyint",
"sql_category": "exact numeric",
"min_value": 0,
"max_value": 255,
"pandas_type": "UInt8",
"odbc_type": pyodbc.SQL_TINYINT,
"odbc_size": 1,
"odbc_precision": 0,
},
{
"sql_type": "smallint",
"sql_category": "exact numeric",
"min_value": -(2 ** 15),
"max_value": 2 ** 15 - 1,
"pandas_type": "Int16",
"odbc_type": pyodbc.SQL_SMALLINT,
"odbc_size": 2,
"odbc_precision": 0,
},
{
"sql_type": "int",
"sql_category": "exact numeric",
"min_value": -(2 ** 31),
"max_value": 2 ** 31 - 1,
"pandas_type": "Int32",
"odbc_type": pyodbc.SQL_INTEGER,
"odbc_size": 4,
"odbc_precision": 0,
},
{
"sql_type": "bigint",
"sql_category": "exact numeric",
"min_value": -(2 ** 63),
"max_value": 2 ** 63 - 1,
"pandas_type": "Int64",
"odbc_type": pyodbc.SQL_BIGINT,
"odbc_size": 8,
"odbc_precision": 0,
},
{
"sql_type": "float",
"sql_category": "approximate numeric",
"min_value": -(1.79 ** 308),
"max_value": 1.79 ** 308,
"pandas_type": "float64",
"odbc_type": pyodbc.SQL_FLOAT,
"odbc_size": 8,
"odbc_precision": 53,
},
{
"sql_type": "time",
"sql_category": "date time",
"min_value": pd.Timedelta("00:00:00.0000000"),
"max_value": | pd.Timedelta("23:59:59.9999999") | pandas.Timedelta |
import numpy as np
def loo_mean_enc(df, col, col_target, col_split='split',
split_train_flag='Train', split_test_flag='Test',
mult_noise=np.random.normal):
""" Leave-one-out mean encoding of categorical features as described by <NAME> in his slideshow
http://www.slideshare.net/OwenZhang2/tips-for-data-science-competitions
:param df: dataframe containing the categorical column and target column
:param col: name of categorical column to be encoded
:param col_target: name of target column to be used for means
:param col_split: name of column that distinguishes train from test set
:param split_train_flag: flag value in `col_split` that denotes training row
:param split_test_flag: flag value in `col_split` that denotes testing row
:param mult_noise: multiplicative noise generator. If `None`, no noise is multiplied
:return:
"""
if mult_noise is None:
mult_noise = lambda size: 1
df_tmp = | pd.DataFrame(index=df.index) | pandas.DataFrame |
import collections
import itertools
import math
import random
import string
from abc import ABC, abstractmethod
from typing import List, Tuple, Any, Dict, Type
import pandas as pd
import numpy as np
def biased_coin(p):
return np.random.choice([1, 0], p=[p, 1 - p])
def RandStr(alphabet=string.ascii_letters, min_len=1, max_len=8):
yield ''.join(random.choice(alphabet) for i in range(random.randint(min_len, max_len)))
def RandSensibleString(min_len=1, max_len=8, seps=[0]):
alphabet = string.ascii_letters
num_seps = len(seps)
res_str = ""
sep = "_"
tmps = seps + [0]
for i in range(0, num_seps + 1):
if i > 0:
res_str += sep
seg_len = random.randint(min_len, (max_len + min_len) // 2)
alphabet = string.ascii_letters if not tmps[i] else ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
res_str += "".join(random.choice(alphabet) for j in range(seg_len))
return res_str
class ColGen(ABC):
@abstractmethod
def generate(self, num_rows: int, col_id: str = "col") -> Tuple[str, Dict[int, Any]]:
pass
class BasicColGen(ColGen):
def __init__(self, all_distinct: bool = None, num_distinct: int = -1, duplicates: bool = None,
all_equal_prob=0.3):
self.all_distinct = all_distinct
self.num_distinct = num_distinct
self.duplicates = duplicates
self.all_equal_prob = all_equal_prob
def generate(self, num_rows: int, col_id: str = "col",
past_cols: Dict[Type, List] = None, feeding_prob: float = 0) -> Tuple[str, Dict[int, Any]]:
if self.all_distinct is None and self.duplicates is None:
all_distinct = random.choice([True, False])
duplicates = random.choice([True, False])
elif self.all_distinct is None:
duplicates = self.duplicates
if not duplicates:
all_distinct = random.choice([True, False])
else:
all_distinct = False
elif self.duplicates is None:
all_distinct = self.all_distinct
duplicates = random.choice([True, False])
else:
all_distinct = False
duplicates = False
if all_distinct:
pool = set()
while len(pool) < num_rows:
pool.add(self.get_value_wrapper(past_cols, feeding_prob))
vals = list(pool)
elif duplicates:
if self.num_distinct == -1 and biased_coin(self.all_equal_prob) == 1:
vals = [self.get_value_wrapper(past_cols, feeding_prob)] * num_rows
else:
pool = set()
while len(pool) < max(self.num_distinct, (num_rows - 1), 1):
pool.add(self.get_value_wrapper(past_cols, feeding_prob))
pool = list(pool)
if self.num_distinct != -1:
vals = random.sample(pool, min(max(0, self.num_distinct), len(pool) - 1))
try:
vals.append(random.choice(vals))
except:
print(pool, vals, num_rows - 1)
raise
else:
vals = []
while len(vals) < num_rows:
vals.append(random.choice(pool))
else:
# Anything goes
vals = []
for i in range(num_rows):
vals.append(self.get_value_wrapper(past_cols, feeding_prob))
ret_col = {}
random.shuffle(vals)
for i, v in enumerate(vals[:num_rows]):
ret_col[i] = v
return col_id + self.get_suffix(), ret_col
def get_value_wrapper(self, past_cols: Dict[Type, List], feeding_prob: float):
cur_type = type(self)
if past_cols is not None and len(past_cols.get(cur_type, [])) > 0:
if biased_coin(feeding_prob) == 1:
# Feed from past columns
chosen_col = random.choice(past_cols[cur_type])
return random.choice(chosen_col)
return self.get_value()
def get_value(self) -> Any:
raise NotImplementedError
def get_suffix(self) -> str:
return ""
class StrColGen(BasicColGen):
def __init__(self, all_distinct: bool = None, num_distinct: int = -1, duplicates: bool = None,
alphabet: str = None, min_len: int = 1, max_len: int = 15):
super().__init__(all_distinct, num_distinct, duplicates)
if alphabet is None:
self.alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-,:;.!?&()"
else:
self.alphabet = alphabet
self.min_len = min_len
self.max_len = max_len
def get_value(self) -> Any:
ret = ""
for i in range(random.randint(self.min_len, self.max_len)):
ret += random.choice(self.alphabet)
return ret
def get_suffix(self) -> str:
suffix = "_str"
if self.all_distinct:
suffix += "_d"
if self.duplicates:
suffix += "_dup"
return suffix
class SensibleStrColGen(BasicColGen):
def __init__(self, all_distinct: bool = None, num_distinct: int = -1, duplicates: bool = None,
alphabet: str = None, min_len: int = 1, max_len: int = 15):
super().__init__(all_distinct, num_distinct, duplicates)
self.seps = [random.choice([0, 0, 1]) for i in range(0, random.choice([0, 0, 0, 1, 1, 2]))]
self.min_len = min_len
self.max_len = max_len
def get_value(self) -> Any:
return RandSensibleString(self.min_len, self.max_len, self.seps)
def get_suffix(self) -> str:
suffix = "_sens_str"
if self.all_distinct:
suffix += "_d"
if self.duplicates:
suffix += "_dup"
return suffix
class IntColGen(BasicColGen):
def __init__(self, all_distinct: bool = None, num_distinct: int = -1, duplicates: bool = None,
min_val: int = -1000, max_val: int = 1000):
super().__init__(all_distinct, num_distinct, duplicates)
self.min_val = min_val
self.max_val = max_val
def get_value(self) -> Any:
return random.randint(self.min_val, self.max_val) # inclusive
def get_suffix(self) -> str:
suffix = "_int"
if self.all_distinct:
suffix += "_d"
if self.duplicates:
suffix += "_dup"
return suffix
class FloatColGen(BasicColGen):
def __init__(self, all_distinct: bool = None, num_distinct: int = -1, duplicates: bool = None,
min_val: int = -1000, max_val: int = 1000, nan_prob: float = 0.05):
super().__init__(all_distinct, num_distinct, duplicates)
self.min_val = min_val
self.max_val = max_val
self.nan_prob = nan_prob
def get_value(self) -> Any:
if self.nan_prob is not None:
if np.random.choice([0, 1], p=[self.nan_prob, 1 - self.nan_prob]) == 0:
return np.nan
return random.uniform(self.min_val, self.max_val)
def get_suffix(self) -> str:
suffix = "_float"
if self.all_distinct:
suffix += "_d"
if self.duplicates:
suffix += "_dup"
return suffix
class BoolColGen(BasicColGen):
def __init__(self, all_distinct: bool = None, num_distinct: int = -1, duplicates: bool = None):
super().__init__(all_distinct, num_distinct, duplicates)
self.all_distinct = False
self.duplicates = False
def get_value(self) -> Any:
return random.choice([True, False])
def get_suffix(self) -> str:
suffix = "_bool"
if self.all_distinct:
suffix += "_d"
if self.duplicates:
suffix += "_dup"
return suffix
class RandDf:
def __init__(self, min_width: int = 1, min_height: int = 1,
max_width: int = 7, max_height: int = 7,
column_gens: List[ColGen] = None,
index_levels: int = None, column_levels: int = None,
max_index_levels: int = 3, max_column_levels: int = 3,
num_rows: int = None, num_columns: int = None,
int_col_prob=0.2, idx_mutation_prob=0.2,
multi_index_prob=0.2, col_prefix='',
col_feeding_prob=0.2, nan_prob=0.05):
self.min_width = min_width
self.min_height = min_height
self.max_width = max_width
self.max_height = max_height
self.index_levels = index_levels
self.column_levels = column_levels
self.max_index_levels = max_index_levels
self.max_column_levels = max_column_levels
self.num_rows = num_rows
self.num_columns = num_columns
self.int_col_prob = int_col_prob
self.idx_mutation_prob = idx_mutation_prob
self.multi_index_prob = multi_index_prob
self.col_prefix = col_prefix
self.col_feeding_prob = col_feeding_prob
if column_gens is None:
self.column_gens = [StrColGen(), SensibleStrColGen(), StrColGen(duplicates=True),
SensibleStrColGen(duplicates=True), FloatColGen(nan_prob=nan_prob)]
else:
self.column_gens = column_gens
def create_multi_index(self, index: pd.Index, num_levels: int, column_index=False) -> pd.MultiIndex:
num_rows = len(index)
vals = list(index)
level_gens: List[ColGen] = [StrColGen(max_len=8), IntColGen(), SensibleStrColGen(max_len=8),
StrColGen(max_len=8, duplicates=True), IntColGen(duplicates=True)]
levels = [vals]
for i in range(num_levels - 1):
col_gen = random.choice(level_gens)
col_id, col_vals = col_gen.generate(num_rows)
levels.append(col_vals.values())
return pd.MultiIndex.from_tuples(zip(*reversed(levels)))
def __iter__(self):
yield self.generate()
def generate(self) -> pd.DataFrame:
num_rows = random.randint(self.min_height, self.max_height) if self.num_rows is None else self.num_rows
num_cols = random.randint(self.min_width, self.max_width) if self.num_columns is None else self.num_columns
if np.random.choice([0, 1], p=[self.multi_index_prob, 1 - self.multi_index_prob]) == 0:
index_levels = random.randint(2, self.max_index_levels) if self.index_levels is None else self.index_levels
else:
index_levels = 1 if self.index_levels is None else self.index_levels
if np.random.choice([0, 1], p=[self.multi_index_prob, 1 - self.multi_index_prob]) == 0:
column_levels = random.randint(2,
self.max_column_levels) if self.column_levels is None else self.column_levels
else:
column_levels = 1 if self.column_levels is None else self.column_levels
df_dict = {}
past_cols: Dict[Type, List[Any]] = collections.defaultdict(list)
for i in range(num_cols):
col_gen: ColGen = random.choice(self.column_gens)
col_id, col_vals = col_gen.generate(num_rows, col_id="{}col{}".format(self.col_prefix, i),
past_cols=past_cols, feeding_prob=self.col_feeding_prob)
past_cols[type(col_gen)].append(col_vals)
df_dict[col_id] = col_vals
df = | pd.DataFrame(df_dict) | pandas.DataFrame |
import pandas as pd
import requests
from oauth2client.service_account import ServiceAccountCredentials
from googleapiclient.discovery import build
from requests.adapters import HTTPAdapter
from urllib3 import Retry
import json
"""
Read google sheet as dataframe
If you read with API, you need to use spreadsheetId and credentials (it is the json file in same notebook folder).
you also need to add the account email associated to the credentials as user shared in the google sheet.
"""
def read_gsheet(spreadsheetId, credentials = None, sheet = None,
valueRenderOption = 'FORMATTED_VALUE', dateTimeRenderOption = 'SERIAL_NUMBER'):
if credentials is None:
post_url = "https://api.askdata.com/smartnotebook/readGsheet"
body = {
"spreadsheetId": spreadsheetId,
"sheet": sheet,
"valueRenderOption": valueRenderOption,
"dateTimeRenderOption": dateTimeRenderOption
}
s = requests.Session()
s.keep_alive = False
retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])
s.mount('https://', HTTPAdapter(max_retries=retries))
headers = {
"Content-Type": "application/json"
}
r = s.post(url=post_url, json=body, headers=headers)
r.raise_for_status()
df = pd.DataFrame(json.loads(r.json()))
else:
scope = ['https://www.googleapis.com/auth/spreadsheets.readonly']
cred = ServiceAccountCredentials.from_json_keyfile_dict(credentials, scope)
service = build('sheets', 'v4', credentials=cred)
sheet_service = service.spreadsheets()
range = 'A1:WWW500000'
if sheet is not None:
range = '%s!%s'%(sheet, range)
result_input = sheet_service.values().get(spreadsheetId = spreadsheetId,
range = range,
valueRenderOption = valueRenderOption,
dateTimeRenderOption = dateTimeRenderOption).execute()
values_input = result_input.get('values', [])
df = pd.DataFrame(values_input[1:], columns=values_input[0])
return df
"""
Read hubspot contacts as dataframe
Usage example: read_hubspot_contacts(api_key)
"""
def read_hubspot_contacts(api_key, offset=100):
from askdata.integrations import hubspot
return hubspot.get_contacts_df(api_key, offset)
"""
Read alpha vantage api
Usage example: read_alphavantage_stock(api_key, symbols)
"""
def read_alphavantage_stock(symbols, api_key):
from askdata.integrations import alphavantage
return alphavantage.get_daily_adjusted_df(symbols, api_key)
def normalize_columns(df: pd.DataFrame):
problematicChars = [",", ";", ":", "{", "}", "(", ")", "=", ">", "<", ".", "!", "?"]
new_cols = {}
for column in df.columns:
columnName = column.lower()
for p_char in problematicChars:
columnName = columnName.replace(p_char, "")
columnName = columnName.replace(" ", "_")
columnName = columnName.replace("-", "_")
columnName = columnName.replace("/", "_")
columnName = columnName.replace("\\", "_")
columnName = columnName.replace("%", "perc")
columnName = columnName.replace("+", "plus")
columnName = columnName.replace("&", "and")
columnName = columnName.replace("cross", "cross_pass")
columnName = columnName.replace("authorization", "authoriz")
columnName = columnName.strip()
for p_char in problematicChars:
columnName = columnName.replace(p_char, "")
new_cols[column] = columnName
return df.rename(columns=new_cols)
def read(type, settings):
if type == "CSV":
return __read_csv(settings)
if type == "EXCEL":
return __read_excel(settings)
if type == "PARQUET":
return __read_parquet(settings)
if type == "GOOGLE_SHEETS":
return __read_gsheet(settings)
if type == "Hubspot":
return __read_hubspot(settings)
else:
raise TypeError("Dataset type not supported yet")
def __read_csv(settings: dict):
# Handle thousands
if settings["thousands"] == "None":
settings["thousands"] = None
# Read source file
df = pd.read_csv(filepath_or_buffer=settings["path"], sep=settings["separator"], encoding=settings["encoding"], thousands=settings["thousands"])
# Detect if any column is a date-time
for col in df.columns:
if df[col].dtype == 'object':
try:
df[col] = pd.to_datetime(df[col])
except ValueError:
pass
# Exec custom post-processing
if "processing" in settings and settings["processing"] != "" and settings["processing"] != None:
exec(settings["processing"])
return df
def __read_parquet(settings: dict):
df = | pd.read_parquet(path=settings["path"]) | pandas.read_parquet |
#encoding=utf-8
import os
import sys
import json
import codecs
import copy
import get_q2q_sim
import pandas as pd
import click
@click.command()
@click.argument("path")
@click.argument("all_ques_path")
@click.option("--save_path", default=None, help="if none, not save")
@click.option("--is_json", type=bool, is_flag=True, help="if path is json file")
def describ(path, all_ques_path, is_json, save_path=None):
all_ques = set()
with codecs.open(all_ques_path, "r", "utf-8") as f:
ques = [v.strip() for v in f.readlines()]
ques = set(ques)
print("all ques nums: {}".format(len(ques)))
with codecs.open(path, "r", "utf-8") as f:
if is_json:
data = json.load(f)
else:
data = []
while True:
s = f.readline().strip()
if not s:
break
t = f.readline().strip()
f.readline()
s = s.replace("SEQUENCE_END", "")
t = t.replace("SEQUENCE_END", "")
s, t = s.strip(), t.strip()
data.append({"source":s,"predict":t})
score_list = []
new_data = []
gen_nums = 0
for i, t in enumerate(data):
if "score" in t:
score_list.append(t["score"])
xt = copy.copy(t)
if t["predict"] == t["source"]:
xt["in_ques_set"] = True
elif t["predict"] not in ques:
xt["in_ques_set"] = False
gen_nums += 1
else:
xt["in_ques_set"] = True
new_data.append(xt)
series_score = | pd.Series(score_list) | pandas.Series |
import numpy as np
import pdb
import gzip
import matplotlib
import matplotlib.pyplot as plt
import cPickle as pkl
import operator
import scipy.io as sio
import os.path
import pandas as pd
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
np.random.seed(23254)
def parse(path):
g = gzip.open(path, 'r')
for l in g:
yield eval(l)
def getuserCache(df):
userCache = {}
for uid in sorted(df.uid.unique().tolist()):
items = sorted(df.loc[df.uid == uid]['iid'].values.tolist())
userCache[uid] = items
return userCache
def getitemCache(df):
itemCache = {}
for iid in sorted(df.iid.unique().tolist()):
users = sorted(df.loc[df.iid == iid]['uid'].values.tolist())
itemCache[iid] = users
return itemCache
def readData(dataset):
totalFile = pd.read_csv('data/'+dataset+'/ratings.dat',sep="\t",usecols=[0,1],names=['uid','iid'],header=0)
total_uids = sorted(totalFile.uid.unique())
total_iids = sorted(totalFile.iid.unique())
trainFile = | pd.read_csv('data/'+dataset+'/LOOTrain.dat',sep="\t",usecols=[0,1],names=['uid','iid'],header=0) | pandas.read_csv |
"""
Import as:
import im_v2.cryptodatadownload.data.client.cdd_client as imcdaclcd
"""
import logging
import os
from typing import Optional
import pandas as pd
import core.pandas_helpers as cpanh
import helpers.hdbg as hdbg
import helpers.hpandas as hpandas
import helpers.hs3 as hs3
import im_v2.common.data.client as icdc
_LOG = logging.getLogger(__name__)
# Latest historical data snapsot.
_LATEST_DATA_SNAPSHOT = "20210924"
class CddClient:
def __init__(
self,
data_type: str,
root_dir: str,
*,
aws_profile: Optional[str] = None,
remove_dups: bool = True,
resample_to_1_min: bool = True,
) -> None:
"""
Load CDD data.
:param: data_type: OHLCV or trade, bid/ask data
:param: root_dir: either a local root path (e.g., "/app/im") or
an S3 root path (e.g., "s3://alphamatic-data/data) to CDD data
:param: aws_profile: AWS profile name (e.g., "am")
:param remove_dups: whether to remove full duplicates or not
:param resample_to_1_min: whether to resample to 1 min or not
"""
self._root_dir = root_dir
self._aws_profile = aws_profile
self._remove_dups = remove_dups
self._resample_to_1_min = resample_to_1_min
self._s3fs = hs3.get_s3fs(self._aws_profile)
# Specify supported data types to load.
self._data_types = ["ohlcv"]
# Verify that requested data type is valid.
hdbg.dassert_in(
data_type.lower(),
self._data_types,
msg="Incorrect data type: '%s'. Acceptable types: '%s'"
% (data_type.lower(), self._data_types),
)
self._data_type = data_type
def read_data(
self,
full_symbol: str,
*,
data_snapshot: Optional[str] = None,
) -> pd.DataFrame:
"""
Load data from S3 and process it for use downstream.
:param full_symbol: `exchange::symbol`, e.g. `binance::BTC_USDT`
:param data_snapshot: snapshot of datetime when data was loaded, e.g. "20210924"
:return: processed CDD data
"""
data_snapshot = data_snapshot or _LATEST_DATA_SNAPSHOT
# Verify that requested data type is valid.
exchange_id, currency_pair = icdc.parse_full_symbol(full_symbol)
# Get absolute file path for a CDD file.
file_path = self._get_file_path(data_snapshot, exchange_id, currency_pair)
# Initialize kwargs dict for further CDD data reading.
# Add "skiprows" to kwargs in order to skip a row with the file name.
read_csv_kwargs = {"skiprows": 1}
#
if hs3.is_s3_path(file_path):
# Add s3fs argument to kwargs.
read_csv_kwargs["s3fs"] = self._s3fs
# Read raw CDD data.
_LOG.info(
"Reading CDD data for exchange id='%s', currencies='%s', from file='%s'...",
exchange_id,
currency_pair,
file_path,
)
data = cpanh.read_csv(file_path, **read_csv_kwargs)
# Apply transformation to raw data.
_LOG.info(
"Processing CDD data for exchange id='%s', currencies='%s'...",
exchange_id,
currency_pair,
)
transformed_data = self._transform(data, exchange_id, currency_pair)
return transformed_data
# TODO(Grisha): factor out common code from `CddClient._get_file_path` and
# `CcxtLoader._get_file_path`.
def _get_file_path(
self,
data_snapshot: str,
exchange_id: str,
currency_pair: str,
) -> str:
"""
Get the absolute path to a file with CDD data.
The file path is constructed in the following way:
`<root_dir>/cryptodatadownload/<snapshot>/<exchange_id>/<currency_pair>.csv.gz`.
:param data_snapshot: snapshot of datetime when data was loaded,
e.g. "20210924"
:param exchange_id: CDD exchange id, e.g. "binance"
:param currency_pair: currency pair `<currency1>_<currency2>`,
e.g. "BTC_USDT"
:return: absolute path to a file with CDD data
"""
# Get absolute file path.
file_name = currency_pair + ".csv.gz"
file_path = os.path.join(
self._root_dir,
"cryptodatadownload",
data_snapshot,
exchange_id,
file_name,
)
# TODO(Dan): Remove asserts below after CMTask108 is resolved.
# Verify that the file exists.
if hs3.is_s3_path(file_path):
hs3.dassert_s3_exists(file_path, self._s3fs)
else:
hdbg.dassert_file_exists(file_path)
return file_path
# TODO(*): Consider making `exchange_id` a class member.
# TODO(*): Replace currencies separator "/" to "_".
def _transform(
self,
data: pd.DataFrame,
exchange_id: str,
currency_pair: str,
) -> pd.DataFrame:
"""
Transform CDD data loaded from S3.
Input data example:
```
unix date symbol open high low close Volume ETH Volume USDT tradecount
1631145600000 2021-09-09 00:00:00 ETH/USDT 3499.01 3499.49 3496.17 3496.36 346.4812 1212024 719
1631145660000 2021-09-09 00:01:00 ETH/USDT 3496.36 3501.59 3495.69 3501.59 401.9576 1406241 702
1631145720000 2021-09-09 00:02:00 ETH/USDT 3501.59 3513.10 3499.89 3513.09 579.5656 2032108 1118
```
Output data example:
```
timestamp open high low close volume epoch currency_pair exchange_id
2021-09-08 20:00:00-04:00 3499.01 3499.49 3496.17 3496.36 346.4812 1631145600000 ETH/USDT binance
2021-09-08 20:01:00-04:00 3496.36 3501.59 3495.69 3501.59 401.9576 1631145660000 ETH/USDT binance
2021-09-08 20:02:00-04:00 3501.59 3513.10 3499.89 3513.09 579.5656 1631145720000 ETH/USDT binance
```
:param data: dataframe with CDD data from S3
:param exchange_id: CDD exchange id, e.g. "binance"
:param currency_pair: currency pair, e.g. "BTC_USDT"
:return: processed dataframe
"""
transformed_data = self._apply_common_transformation(
data, exchange_id, currency_pair
)
if self._data_type.lower() == "ohlcv":
transformed_data = self._apply_ohlcv_transformation(transformed_data)
else:
hdbg.dfatal(
"Incorrect data type: '%s'. Acceptable types: '%s'"
% (self._data_type.lower(), self._data_types)
)
return transformed_data
def _apply_common_transformation(
self, data: pd.DataFrame, exchange_id: str, currency_pair: str
) -> pd.DataFrame:
"""
Apply transform common to all CDD data.
This includes:
- Datetime format assertion
- Converting string dates to UTC `pd.Timestamp`
- Removing full duplicates
- Resampling to 1 minute using NaNs
- Name volume and currency pair columns properly
- Adding exchange_id and currency_pair columns
:param data: raw data from S3
:param exchange_id: CDD exchange id, e.g. "binance"
:param currency_pair: currency pair, e.g. "BTC_USDT"
:return: transformed CDD data
"""
# Verify that the Unix data is provided in ms.
hdbg.dassert_container_type(
data["unix"], container_type=None, elem_type=int
)
# Rename col with original Unix ms epoch.
data = data.rename({"unix": "epoch"}, axis=1)
# Transform Unix epoch into UTC timestamp.
data["timestamp"] = | pd.to_datetime(data["epoch"], unit="ms", utc=True) | pandas.to_datetime |
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
import pandas._testing as tm
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
data_dict = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
@pytest.fixture(params=sorted(data_dict.keys()))
def item(self, request):
key = request.param
return key, data_dict[key]
item2 = item
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, Index):
assert obj.dtype == label
elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self, item):
# to confirm test case covers intended dtypes
typ, vals = item
self._check_expected_dtype(Index(vals), typ)
self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self, item):
# GH 13660
typ1, vals1 = item
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = Categorical(list(vals1) + list(vals2))
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="y")
res = i1.append(i2)
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# index.append name match
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="x")
res = i1.append(i2)
exp = Index(exp_data, name="x")
tm.assert_index_equal(res, exp)
# cannot append non-index
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append(vals2)
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append([Index(vals2), vals3])
# ----- Series ----- #
# series.append
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
exp = Series(exp_data3)
tm.assert_series_equal(res, exp)
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
# name mismatch
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="y")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# name match
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="x")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data, name="x")
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# cannot append non-index
msg = (
r"cannot concatenate object of type '.+'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append(vals2)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append([Series(vals2), vals3])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), vals2])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), Series(vals2), vals3])
def test_concatlike_dtypes_coercion(self, item, item2, request):
# GH 13660
typ1, vals1 = item
typ2, vals2 = item2
vals3 = vals2
# basically infer
exp_index_dtype = None
exp_series_dtype = None
if typ1 == typ2:
# same dtype is tested in test_concatlike_same_dtypes
return
elif typ1 == "category" or typ2 == "category":
# The `vals1 + vals2` below fails bc one of these is a Categorical
# instead of a list; we have separate dedicated tests for categorical
return
warn = None
# specify expected dtype
if typ1 == "bool" and typ2 in ("int64", "float64"):
# series coerces to numeric based on numpy rule
# index doesn't because bool is object dtype
exp_series_dtype = typ2
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif typ2 == "bool" and typ1 in ("int64", "float64"):
exp_series_dtype = typ1
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif (
typ1 == "datetime64[ns, US/Eastern]"
or typ2 == "datetime64[ns, US/Eastern]"
or typ1 == "timedelta64[ns]"
or typ2 == "timedelta64[ns]"
):
exp_index_dtype = object
exp_series_dtype = object
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# ----- Series ----- #
# series._append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(
[Series(vals2), Series(vals3)], ignore_index=True
)
exp = Series(exp_data3, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp)
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
def test_concatlike_common_coerce_to_pandas_object(self):
# GH 13626
# result must be Timestamp/Timedelta, not datetime.datetime/timedelta
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"])
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
]
)
res = dti.append(tdi)
tm.assert_index_equal(res, exp)
assert isinstance(res[0], pd.Timestamp)
assert isinstance(res[-1], pd.Timedelta)
dts = Series(dti)
tds = Series(tdi)
res = dts._append(tds)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
res = pd.concat([dts, tds])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
def test_concatlike_datetimetz(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 7795
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz=tz)
exp = pd.DatetimeIndex(
["2011-01-01", "2011-01-02", "2012-01-01", "2012-01-02"], tz=tz
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
@pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo", "EST5EDT"])
def test_concatlike_datetimetz_short(self, tz):
# GH#7795
ix1 = pd.date_range(start="2014-07-15", end="2014-07-17", freq="D", tz=tz)
ix2 = pd.DatetimeIndex(["2014-07-11", "2014-07-21"], tz=tz)
df1 = DataFrame(0, index=ix1, columns=["A", "B"])
df2 = DataFrame(0, index=ix2, columns=["A", "B"])
exp_idx = pd.DatetimeIndex(
["2014-07-15", "2014-07-16", "2014-07-17", "2014-07-11", "2014-07-21"],
tz=tz,
)
exp = DataFrame(0, index=exp_idx, columns=["A", "B"])
tm.assert_frame_equal(df1._append(df2), exp)
tm.assert_frame_equal(pd.concat([df1, df2]), exp)
def test_concatlike_datetimetz_to_object(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 13660
# different tz coerces to object
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"])
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01"),
pd.Timestamp("2012-01-02"),
],
dtype=object,
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# different tz
dti3 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="US/Pacific")
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01", tz="US/Pacific"),
pd.Timestamp("2012-01-02", tz="US/Pacific"),
],
dtype=object,
)
res = dti1.append(dti3)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts3 = Series(dti3)
res = dts1._append(dts3)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts3])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period(self):
# GH 13660
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01", "2012-02"], freq="M")
exp = pd.PeriodIndex(["2011-01", "2011-02", "2012-01", "2012-02"], freq="M")
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1._append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_diff_freq_to_object(self):
# GH 13221
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01-01", "2012-02-01"], freq="D")
exp = Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2012-01-01", freq="D"),
pd.Period("2012-02-01", freq="D"),
],
dtype=object,
)
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1._append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_mixed_dt_to_object(self):
# GH 13221
# different datetimelike
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
],
dtype=object,
)
res = pi1.append(tdi)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
tds = Series(tdi)
res = ps1._append(tds)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, tds])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# inverse
exp = Index(
[
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
],
dtype=object,
)
res = tdi.append(pi1)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
tds = Series(tdi)
res = tds._append(ps1)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([tds, ps1])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concat_categorical(self):
# GH 13524
# same categories -> category
s1 = Series([1, 2, np.nan], dtype="category")
s2 = Series([2, 1, 2], dtype="category")
exp = Series([1, 2, np.nan, 2, 1, 2], dtype="category")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
# partially different categories => not-category
s1 = Series([3, 2], dtype="category")
s2 = Series([2, 1], dtype="category")
exp = Series([3, 2, 2, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
# completely different categories (same dtype) => not-category
s1 = Series([10, 11, np.nan], dtype="category")
s2 = Series([np.nan, 1, 3, 2], dtype="category")
exp = Series([10, 11, np.nan, np.nan, 1, 3, 2], dtype=np.float64)
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
def test_union_categorical_same_categories_different_order(self):
# https://github.com/pandas-dev/pandas/issues/19096
a = Series(Categorical(["a", "b", "c"], categories=["a", "b", "c"]))
b = Series(Categorical(["a", "b", "c"], categories=["b", "a", "c"]))
result = pd.concat([a, b], ignore_index=True)
expected = Series(
Categorical(["a", "b", "c", "a", "b", "c"], categories=["a", "b", "c"])
)
tm.assert_series_equal(result, expected)
def test_concat_categorical_coercion(self):
# GH 13524
# category + not-category => not-category
s1 = Series([1, 2, np.nan], dtype="category")
s2 = Series([2, 1, 2])
exp = | Series([1, 2, np.nan, 2, 1, 2], dtype=np.float64) | pandas.Series |
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
from io import (
BytesIO,
StringIO,
)
import os
import platform
from urllib.error import URLError
import pytest
from pandas.errors import (
EmptyDataError,
ParserError,
)
import pandas.util._test_decorators as td
from pandas import DataFrame
import pandas._testing as tm
# TODO(1.4) Please xfail individual tests at release time
# instead of skip
pytestmark = pytest.mark.usefixtures("pyarrow_skip")
@pytest.mark.network
@tm.network(
url=(
"https://raw.github.com/pandas-dev/pandas/main/"
"pandas/tests/io/parser/data/salaries.csv"
),
check_before_test=True,
)
def test_url(all_parsers, csv_dir_path):
parser = all_parsers
kwargs = {"sep": "\t"}
url = (
"https://raw.github.com/pandas-dev/pandas/main/"
"pandas/tests/io/parser/data/salaries.csv"
)
url_result = parser.read_csv(url, **kwargs)
local_path = os.path.join(csv_dir_path, "salaries.csv")
local_result = parser.read_csv(local_path, **kwargs)
tm.assert_frame_equal(url_result, local_result)
@pytest.mark.slow
def test_local_file(all_parsers, csv_dir_path):
parser = all_parsers
kwargs = {"sep": "\t"}
local_path = os.path.join(csv_dir_path, "salaries.csv")
local_result = parser.read_csv(local_path, **kwargs)
url = "file://localhost/" + local_path
try:
url_result = parser.read_csv(url, **kwargs)
| tm.assert_frame_equal(url_result, local_result) | pandas._testing.assert_frame_equal |
import numpy as _numpy
import math as _math
import pandas as _pd
import datetime as _datetime
import warnings
from apodeixi.util.a6i_error import ApodeixiError
from apodeixi.util.warning_utils import WarningUtils
from apodeixi.util.list_utils import ListMerger
class DataFrameUtils():
def __init__(self):
return
def _numpy_2_float(self, x):
'''
Cleans problems with numbers in the trees being built. Turns out that if they are numpy classes then the
YAML produced is unreadable and sometimes won't load. So move anything numpy to floats. If there are no decimals, returns an int
'''
if type(x)==_numpy.int32 or type(x)==_numpy.int64:
return int(x)
elif type(x)==_numpy.float32 or type(x)==_numpy.float64 or type(x)== float:
y = float(x)
if not _math.isnan(x) and int(x) - y == 0: # this is really an int
return int(x)
else:
return y # This is really a float
else:
return x
def clean(self, x):
'''
Addresses a number of problems with cell values returned by Pandas when which are not formattable outside Pandas
in a nice way. Things like: nan, NaT, dates, numpy classes, ...
So it returns a "cleaned up" version of x, safe to use in text messages or as values in a YAML file.
It preserves the "natural type" of x - numbers become int or float, dates become datetime, strings remain strings,
and "bad stuff" (nans, NaT, etc) become an empty string.
If there is nothing to clean, just return x
'''
# Clean up numerical stuff, if any
y = self._numpy_2_float(x)
if type(y)==float and _math.isnan(y):
y = ''
# Clean up NaT stuff, if any
if type(y) == type(_pd.NaT):
y = ''
# Clean up dates, if any
if 'strftime' in dir(y):
y = y.strftime('%Y-%m-%d') # As required by YAML: ISO-8601 simple date format
y = _datetime.datetime.strptime(y, '%Y-%m-%d') # Now parse into a datetime
# Tidy up strings, if needed - remove new lines, trailing or leading spaces
'''
if type(y)==str:
y = y.replace('\n', '').strip(' ')
'''
return y
def safely_drop_duplicates(self, parent_trace, df):
'''
Implements dropping of duplicates for DataFrame `df` in a more robust manner than Pandas' default implementation,
which fails if the cells in the DataFrame are not hashable.
For example, lists are not hashable. If the DataFrame contains lists then an attempt to drop duplicates
would produce an error "TypeError: unhashable type: 'list'".
In Apodeixi this happens often - for example, when manifests contain many-to-many mappings to other manifests
(e.g., the milestones kind has a list to reference big-rocks). So it is a problem to address since dropping of
duplicates happens in various use cases (such as when comparing a manifest to its previous version, by
creating 2 DataFrames and comparing interval-by-interval, dropping duplicates for each interval)'
This method remedies this problem by converting DataFrame contents to a string, dropping duplicates, and using
the index of the result to select in the original DataFrame, so the returned DataFrame's cells are the same object as
when initially provided (e.g., lists)
This implementation is inspired by https://newbedev.com/pandas-drop-duplicates-method-not-working
@param df A Pandas DataFrame
@return A DataFrame, obtained from the first by dropping duplicates
'''
try:
strings_df = df.astype(str)
no_duplicates_strings_df = strings_df.drop_duplicates()
no_duplicates_df = df.loc[no_duplicates_strings_df.index]
return no_duplicates_df
except Exception as ex:
raise ApodeixiError(parent_trace, "Encountered problem when dropping duplicates from a DataFrame",
data = {"type(error)": str(type(ex)), "error": str(ex)})
def safe_unique(self, parent_trace, df, column_name):
'''
More robust implementation than Pandas for obtaining a list of the unique values for a column in
a DataFrame.
In Pandas, one might typically do something like:
df[column_name].unique()
This has proven not robust enough in the Apodeixi code base because it can obscurely mask a defect
elsewhere in Apodeixi, with a cryptic message like:
'DataFrame' object has no attribute 'unique'
The problem arises because there might be "duplicates" (usually due to another defect in the code)
in the columns of DataFrame df. While technically speaking columns are "unique", the way Pandas
handles a "user bad practice" of putting multiple columns with the same name is to treat the column
index as based on objects, not strings. That allows effectively to have duplicates among the columns
of DataFrame df, like so:
UID | Area | UID-1 | Indicator | UID-2 | Sub-Indicator | UID-2 | UID-3 | Space
---------------------------------------------------------------------------------------------------------
A1 | Adopt | A1.I1 | throughput| | | A1.I1.S1 | | tests
A1 | Adopt | A1.I2 | latency | A1.I2.SU1 | interactive | | A1.I2.SU1.S1 | tests
The second occurrence of "UID-2" should have been merged into the "UID-3" column, but we once had an Apodeixi defect
that didn't, instead having two columns called "UID2". This is because Apodeixi was incorrectly using
"UID-n" if the UID had exactly n tokens, which is not a unique acronym path if some of the entities
are blank as in the example above, where the first row has no sub-indicator.
Upshot: the dataframe columns have "UID-2" duplicated, so an attempt to do
df["UID-2]
would produce a DataFrame, not a Series, so calling "unique()" on it would error out with a very cryptic
message:
'DataFrame' object has no attribute 'unique'
Instead, what this "more robust" method does is check if the column in question is not unique, and so it will
error out with hopefully a less criptic message.
If column is unique, it will return a list.
@param column_name A string, corresponding to the name of a column in the DataFrame
@param df A DataFrame. It is expected to have the `column_name` parameter as one of its columns.
'''
if type(column_name) != str:
raise ApodeixiError(parent_trace, "Can't get unique values for a DataFrame's column because column name provided is a '"
+ str(type(column_name)) + "' was provided instead of a string as expected")
if type(df) != _pd.DataFrame:
raise ApodeixiError(parent_trace, "Can't get unique values for column '" + str(column_name) + "' because a '"
+ str(type(df)) + "' was provided instead of a DataFrame as expected")
if len(column_name.strip()) ==0:
raise ApodeixiError(parent_trace, "Can't get unique values for a DataFrame's column because column name provided is blank")
columns = list(df.columns)
matches = [col for col in columns if col == column_name]
if len(matches) == 0:
raise ApodeixiError(parent_trace, "Can't get unique values in a DataFrame for column '" + str(column_name) + "' because it "
+ " is not one of the DataFrame's columns",
data = {"df columns": str(columns)})
elif len(matches) > 1:
raise ApodeixiError(parent_trace, "Can't get unique values in a DataFrame for column '" + str(column_name) + "' because it "
+ "appears multiple times as a column in the DataFrame",
data = {"df columns": str(columns)})
# All is good, so now it is safe to call the Pandas unique() function
return list(df[column_name].unique())
def replicate_dataframe(self, parent_trace, seed_df, categories_list):
'''
Creates and returns a DataFrame, by replicating the `seed_df` for each member of the `categories_list`,
and concatenating them horizonally.
The columns are also added a new top level, from `categories_list`.
A usecase where this is used is to create templates for product-related manifests where similar content
must exist per sub-product.
Example:
Suppose a product has subproducts ["Basic", "Premium], and this is provided as the `categories_list`.
Suppose the `seed_df` is some estimates about the product, such as:
bigRock FY 19 FY 20 FY 21
================================
0 None 150 150 150
1 None 100 100 100
2 None 0 0 0
3 None 45 45 45
4 None 0 0 0
5 None 300 300 300
6 None 140 140 140
Then this method would return the following DataFrame
Basic | Premium
bigRock FY 19 FY 20 FY 21 | bigRock FY 19 FY 20 FY 21
====================================================================
0 None 150 150 150 None 150 150 150
1 None 100 100 100 None 100 100 100
2 None 0 0 0 None 0 0 0
3 None 45 45 45 None 45 45 45
4 None 0 0 0 None 0 0 0
5 None 300 300 300 None 300 300 300
6 None 140 140 140 None 140 140 140
@param categories_list A list of hashable objects, such as strings or ints
'''
with warnings.catch_warnings(record=True) as w:
WarningUtils().turn_traceback_on(parent_trace, warnings_list=w)
dfs_dict = {}
for category in categories_list:
dfs_dict[category] = seed_df.copy()
replicas_df = | _pd.concat(dfs_dict, axis=1) | pandas.concat |
from pyspark.sql import SparkSession
from pyspark import SparkContext
import pyspark.ml as M
import pyspark.sql.functions as F
import pyspark.sql.types as T
import pandas as pd
import os
import numpy as np
import json
from tqdm import tqdm
from scipy import sparse
import psutil
from pathlib import Path
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, fbeta_score, accuracy_score, confusion_matrix
from src import *
FP_b = 'interim/b_features/*'
FP_m = 'interim/m_features/*'
FP_pram = os.path.join(ROOT_DIR, 'config/train-params.json')
FP_pram_test = os.path.join(ROOT_DIR, 'config/test-train.json')
def _preproc(test, FP_b, FP_m):
SparkContext.setSystemProperty('spark.executor.memory', '64g')
sc = SparkContext("local", "App Name")
sc.setLogLevel("ERROR")
spark = SparkSession(sc)
spark.conf.set('spark.ui.showConsoleProgress', True)
spark.conf.set("spark.sql.shuffle.partitions", NUM_WORKER)
if test:
fp_b = os.path.join(ROOT_DIR, 'data/tests', FP_b)
fp_m = os.path.join(ROOT_DIR, 'data/tests', FP_m)
else:
fp_b = os.path.join(ROOT_DIR, 'data/datasets', FP_b)
fp_m = os.path.join(ROOT_DIR, 'data/datasets', FP_m)
if test and os.path.exists(FP_pram_test):
files = json.load(open(FP_pram_test))
b_file = pd.Series(files['benign']).apply(lambda x: os.path.join(ROOT_DIR, 'data/tests/interim/b_features', x)).tolist()
m_file = pd.Series(files['malware']).apply(lambda x: os.path.join(ROOT_DIR, 'data/tests/interim/m_features', x)).tolist()
df_b = spark.read.format("csv").option("header", "true").load(b_file)
df_m = spark.read.format("csv").option("header", "true").load(m_file)
elif (not test) and os.path.exists(FP_pram):
files = json.load(open(FP_pram))
b_file = pd.Series(files['benign']).apply(lambda x: os.path.join(ROOT_DIR, 'data/datasets/interim/b_features', x)).tolist()
m_file = | pd.Series(files['malware']) | pandas.Series |
import pandas as pd
from business_rules.operators import (DataframeType, StringType,
NumericType, BooleanType, SelectType,
SelectMultipleType, GenericType)
from . import TestCase
from decimal import Decimal
import sys
import pandas
class StringOperatorTests(TestCase):
def test_operator_decorator(self):
self.assertTrue(StringType("foo").equal_to.is_operator)
def test_string_equal_to(self):
self.assertTrue(StringType("foo").equal_to("foo"))
self.assertFalse(StringType("foo").equal_to("Foo"))
def test_string_not_equal_to(self):
self.assertTrue(StringType("foo").not_equal_to("Foo"))
self.assertTrue(StringType("foo").not_equal_to("boo"))
self.assertFalse(StringType("foo").not_equal_to("foo"))
def test_string_equal_to_case_insensitive(self):
self.assertTrue(StringType("foo").equal_to_case_insensitive("FOo"))
self.assertTrue(StringType("foo").equal_to_case_insensitive("foo"))
self.assertFalse(StringType("foo").equal_to_case_insensitive("blah"))
def test_string_starts_with(self):
self.assertTrue(StringType("hello").starts_with("he"))
self.assertFalse(StringType("hello").starts_with("hey"))
self.assertFalse(StringType("hello").starts_with("He"))
def test_string_ends_with(self):
self.assertTrue(StringType("hello").ends_with("lo"))
self.assertFalse(StringType("hello").ends_with("boom"))
self.assertFalse(StringType("hello").ends_with("Lo"))
def test_string_contains(self):
self.assertTrue(StringType("hello").contains("ell"))
self.assertTrue(StringType("hello").contains("he"))
self.assertTrue(StringType("hello").contains("lo"))
self.assertFalse(StringType("hello").contains("asdf"))
self.assertFalse(StringType("hello").contains("ElL"))
def test_string_matches_regex(self):
self.assertTrue(StringType("hello").matches_regex(r"^h"))
self.assertFalse(StringType("hello").matches_regex(r"^sh"))
def test_non_empty(self):
self.assertTrue(StringType("hello").non_empty())
self.assertFalse(StringType("").non_empty())
self.assertFalse(StringType(None).non_empty())
class NumericOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, err_string):
NumericType("foo")
def test_numeric_type_validates_and_casts_decimal(self):
ten_dec = Decimal(10)
ten_int = 10
ten_float = 10.0
if sys.version_info[0] == 2:
ten_long = long(10)
else:
ten_long = int(10) # long and int are same in python3
ten_var_dec = NumericType(ten_dec) # this should not throw an exception
ten_var_int = NumericType(ten_int)
ten_var_float = NumericType(ten_float)
ten_var_long = NumericType(ten_long)
self.assertTrue(isinstance(ten_var_dec.value, Decimal))
self.assertTrue(isinstance(ten_var_int.value, Decimal))
self.assertTrue(isinstance(ten_var_float.value, Decimal))
self.assertTrue(isinstance(ten_var_long.value, Decimal))
def test_numeric_equal_to(self):
self.assertTrue(NumericType(10).equal_to(10))
self.assertTrue(NumericType(10).equal_to(10.0))
self.assertTrue(NumericType(10).equal_to(10.000001))
self.assertTrue(NumericType(10.000001).equal_to(10))
self.assertTrue(NumericType(Decimal('10.0')).equal_to(10))
self.assertTrue(NumericType(10).equal_to(Decimal('10.0')))
self.assertFalse(NumericType(10).equal_to(10.00001))
self.assertFalse(NumericType(10).equal_to(11))
def test_numeric_not_equal_to(self):
self.assertTrue(NumericType(10).not_equal_to(10.00001))
self.assertTrue(NumericType(10).not_equal_to(11))
self.assertTrue(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.1')))
self.assertFalse(NumericType(10).not_equal_to(10))
self.assertFalse(NumericType(10).not_equal_to(10.0))
self.assertFalse(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.0')))
def test_other_value_not_numeric(self):
error_string = "10 is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, error_string):
NumericType(10).equal_to("10")
def test_numeric_greater_than(self):
self.assertTrue(NumericType(10).greater_than(1))
self.assertFalse(NumericType(10).greater_than(11))
self.assertTrue(NumericType(10.1).greater_than(10))
self.assertFalse(NumericType(10.000001).greater_than(10))
self.assertTrue(NumericType(10.000002).greater_than(10))
def test_numeric_greater_than_or_equal_to(self):
self.assertTrue(NumericType(10).greater_than_or_equal_to(1))
self.assertFalse(NumericType(10).greater_than_or_equal_to(11))
self.assertTrue(NumericType(10.1).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000001).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000002).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10).greater_than_or_equal_to(10))
def test_numeric_less_than(self):
self.assertTrue(NumericType(1).less_than(10))
self.assertFalse(NumericType(11).less_than(10))
self.assertTrue(NumericType(10).less_than(10.1))
self.assertFalse(NumericType(10).less_than(10.000001))
self.assertTrue(NumericType(10).less_than(10.000002))
def test_numeric_less_than_or_equal_to(self):
self.assertTrue(NumericType(1).less_than_or_equal_to(10))
self.assertFalse(NumericType(11).less_than_or_equal_to(10))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.1))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000001))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000002))
self.assertTrue(NumericType(10).less_than_or_equal_to(10))
class BooleanOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType("foo")
err_string = "None is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType(None)
def test_boolean_is_true_and_is_false(self):
self.assertTrue(BooleanType(True).is_true())
self.assertFalse(BooleanType(True).is_false())
self.assertFalse(BooleanType(False).is_true())
self.assertTrue(BooleanType(False).is_false())
class SelectOperatorTests(TestCase):
def test_contains(self):
self.assertTrue(SelectType([1, 2]).contains(2))
self.assertFalse(SelectType([1, 2]).contains(3))
self.assertTrue(SelectType([1, 2, "a"]).contains("A"))
def test_does_not_contain(self):
self.assertTrue(SelectType([1, 2]).does_not_contain(3))
self.assertFalse(SelectType([1, 2]).does_not_contain(2))
self.assertFalse(SelectType([1, 2, "a"]).does_not_contain("A"))
class SelectMultipleOperatorTests(TestCase):
def test_contains_all(self):
self.assertTrue(SelectMultipleType([1, 2]).
contains_all([2, 1]))
self.assertFalse(SelectMultipleType([1, 2]).
contains_all([2, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
contains_all([2, 1, "A"]))
def test_is_contained_by(self):
self.assertTrue(SelectMultipleType([1, 2]).
is_contained_by([2, 1, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
is_contained_by([2, 3, 4]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
is_contained_by([2, 1, "A"]))
def test_shares_at_least_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_at_least_one_element_with([4, "A"]))
def test_shares_exactly_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_exactly_one_element_with([4, "A"]))
self.assertFalse(SelectMultipleType([1, 2, 3]).
shares_exactly_one_element_with([2, 3, "a"]))
def test_shares_no_elements_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_no_elements_with([4, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_no_elements_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2, "a"]).
shares_no_elements_with([4, "A"]))
class DataframeOperatorTests(TestCase):
def test_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ],
})
result: pd.Series = DataframeType({"value": df}).exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_not_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ]
})
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, "", 7, ],
"var2": [3, 5, 6, "", 2, ],
"var3": [1, 3, 8, "", 7, ],
"var4": ["test", "issue", "one", "", "two", ]
})
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to({
"target": "--r1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 20
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var4",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False, False, ])))
def test_not_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": 20
}).equals(pandas.Series([True, True, True])))
def test_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "", "new", "val"],
"var2": ["WORD", "", "test", "VAL"],
"var3": ["LET", "", "GO", "read"]
})
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "NEW"
}).equals(pandas.Series([False, False, True, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False])))
def test_not_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "new", "val"],
"var2": ["WORD", "test", "VAL"],
"var3": ["LET", "GO", "read"],
"var4": ["WORD", "NEW", "VAL"]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
def test_less_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than({
"target": "--r1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": 3
}).equals(pandas.Series([True, True, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than_or_equal_to({
"target": "--r1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([False, False, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 5, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than_or_equal_to({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_greater_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": 5000
}).equals(pandas.Series([False, False, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than_or_equal_to({
"target": "var1",
"comparator": "--r4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([True, True, False])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 3, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than_or_equal_to({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_contains(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": 5
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).does_not_contain({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([False, True, True])))
def test_contains_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["POKEMON", "CHARIZARD", "BULBASAUR"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "PIKACHU"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["pikachu", "charizard", "bulbasaur"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var1",
"comparator": "IVYSAUR"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([False, True, True])))
def test_is_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [4,5,6]
}).equals(pandas.Series([False, False, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([True, True, False])))
def test_is_not_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([False, False, True])))
def test_is_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
def test_is_not_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
def test_prefix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).prefix_matches_regex({
"target": "--r2",
"comparator": "w.*",
"prefix": 2
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([False, False])))
def test_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).suffix_matches_regex({
"target": "--r1",
"comparator": "es.*",
"suffix": 3
}).equals(pandas.Series([False, True])))
self.assertTrue(DataframeType({"value": df}).suffix_matches_regex({
"target": "var1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([False, False])))
def test_not_prefix_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_prefix_matches_regex({
"target": "--r1",
"comparator": ".*",
"prefix": 2
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).not_prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([True, True])))
def test_not_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_suffix_matches_regex({
"target": "var1",
"comparator": ".*",
"suffix": 3
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_suffix_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([True, True])))
def test_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).matches_regex({
"target": "--r1",
"comparator": ".*",
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).matches_regex({
"target": "var2",
"comparator": "[0-9].*",
}).equals(pandas.Series([False, False])))
def test_not_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_matches_regex({
"target": "var1",
"comparator": ".*",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
}).equals(pandas.Series([True, True])))
def test_starts_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).starts_with({
"target": "--r1",
"comparator": "WO",
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).starts_with({
"target": "var2",
"comparator": "ABC",
}).equals(pandas.Series([False, False])))
def test_ends_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).ends_with({
"target": "--r1",
"comparator": "abc",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).ends_with({
"target": "var1",
"comparator": "est",
}).equals(pandas.Series([False, True])))
def test_has_equal_length(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
result = df_operator.has_equal_length({"target": "--r_1", "comparator": 4})
self.assertTrue(result.equals(pandas.Series([True, False])))
def test_has_not_equal_length(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
result = df_operator.has_not_equal_length({"target": "--r_1", "comparator": 4})
self.assertTrue(result.equals(pandas.Series([False, True])))
def test_longer_than(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.longer_than({"target": "--r_1", "comparator": 3}).equals(pandas.Series([True, True])))
def test_longer_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'alex']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.longer_than_or_equal_to({"target": "--r_1", "comparator": 3}).equals(pandas.Series([True, True])))
self.assertTrue(df_operator.longer_than_or_equal_to({"target": "var_1", "comparator": 4}).equals(pandas.Series([True, True])))
def test_shorter_than(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'val']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.shorter_than({"target": "--r_1", "comparator": 5}).equals(pandas.Series([True, True])))
def test_shorter_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'alex']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.shorter_than_or_equal_to({"target": "--r_1", "comparator": 5}).equals(pandas.Series([True, True])))
self.assertTrue(df_operator.shorter_than_or_equal_to({"target": "var_1", "comparator": 4}).equals(pandas.Series([True, True])))
def test_contains_all(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['test', 'value', 'word'],
"var2": ["test", "value", "test"]
}
)
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var1",
"comparator": "var2",
}))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_all({
"target": "--r1",
"comparator": "--r2",
}))
self.assertFalse(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": "var1",
}))
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": ["test", "value"],
}))
def test_not_contains_all(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['test', 'value', 'word'],
"var2": ["test", "value", "test"]
}
)
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var1",
"comparator": "var2",
}))
self.assertFalse(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": "var1",
}))
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": ["test", "value"],
}))
def test_invalid_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2099'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).invalid_date({"target": "--r1"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).invalid_date({"target": "var3"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).invalid_date({"target": "var2"})
.equals(pandas.Series([False, False, False, True, True])))
def test_date_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var1", "comparator": '2021'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "1997-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([False, False, True, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).date_equal_to({"target": "--r3", "comparator": "--r4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "hour"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "minute"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "second"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "microsecond"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var6", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var6", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_not_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var6", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_less_than(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var6", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals( | pandas.Series([False, False, False, False, False]) | pandas.Series |
# -*- coding: utf-8 -*-
#!/usr/bin/python3
from abc import *
import pandas as pd
import json
from collections import OrderedDict
class ModuleComponentInterface(metaclass=ABCMeta):
def __init__(self):
self.__status = 0
self.fHandle = 0
self.attrib = OrderedDict({
"name" :"default",
"author" :"you",
"ver" :"0.0",
"id" :0,
"param" :"None",
"encode" :"utf-8",
"base" :0,
"last" :0,
"excl" :False,
"save_result":False,
"result" :None,
"flag" :0
})
def __del__(self):
pass
@property
def errno(self):
return self.__status
@property
def id(self):
return self.get_attrib("id",0)
def status(self,status):
if(type(status)==int):
self.__status = status
def update_attrib(self,key,value): # 모듈 속성 업데이트
self.attrib.update({key:value})
def cat_attrib(self): # 모듈 속성 보기
return json.dumps(self.attrib)
@abstractmethod
def module_open(self,id=2): # Reserved method for multiprocessing
self.__status = 0
self.attrib.update({"id":int(id)})
@abstractmethod
def module_close(self): # Reserved method for multiprocessing
pass
@abstractmethod
def set_attrib(self,key,value): # 모듈 호출자가 모듈 속성 변경/추가하는 method interface
self.update_attrib(key,value)
@abstractmethod
def get_attrib(self,key,value=None): # 모듈 호출자가 모듈 속성 획득하는 method interface
return self.attrib.get(key)
@abstractmethod
def execute(self,cmd=None,option=None): # 모듈 호출자가 모듈을 실행하는 method
pass
class MemFrameTool(object):
@staticmethod
def read_csv(csv,encoding='utf-8',error=False):
return pd.read_csv(csv,encoding=encoding,error_bad_lines=error).drop_duplicates()
@staticmethod
def read_xlsx(xlsx,sheet):
return | pd.read_excel(xlsx,sheet) | pandas.read_excel |
import streamlit as st
import pandas as pd
import requests
import os
from dotenv import load_dotenv
from nomics import Nomics
import json
import plotly
import yfinance as yf
import matplotlib.pyplot as plt
from PIL import Image
from fbprophet import Prophet
import hvplot as hv
import hvplot.pandas
import datetime as dt
from babel.numbers import format_currency
import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import svm
from pandas.tseries.offsets import DateOffset
from sklearn.metrics import classification_report
from sklearn.ensemble import AdaBoostClassifier
import numpy as np
from tensorflow import keras
import plotly.express as px
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
# 2 PERFORM EXPLORATORY DATA ANALYSIS AND VISUALIZATION
# Function to normalize stock prices based on their initial price
def normalize(df):
x = df.copy()
for i in x.columns[1:]:
x[i] = x[i]/x[i][0]
return x
# Function to plot interactive plots using Plotly Express
print("Function to plot interactive plots using Plotly Express")
def interactive_plot(df, title):
fig = px.line(title = title)
for i in df.columns[1:]:
fig.add_scatter(x = df['Date'], y = df[i], name = i)
fig.show()
# Function to concatenate the date, stock price, and volume in one dataframe
def individual_stock(price_df, vol_df, name):
return pd.DataFrame({'Date': price_df['Date'], 'Close': price_df[name], 'Volume': vol_df[name]})
# Load .env environment variables
load_dotenv()
## Page expands to full width
st.set_page_config(layout='wide')
image = Image.open('images/crypto_image.jpg')
st.image(image,width = 600)
# Header for main and sidebar
st.title( "Crypto Signal Provider Web App")
st.markdown("""This app displays top 10 cryptocurrencies by market cap.""")
st.caption("NOTE: USDT & USDC are stablecoins pegged to the Dollar.")
st.sidebar.title("Crypto Signal Settings")
# Get nomics api key
nomics_api_key = os.getenv("NOMICS_API_KEY")
nomics_url = "https://api.nomics.com/v1/prices?key=" + nomics_api_key
nomics_currency_url = ("https://api.nomics.com/v1/currencies/ticker?key=" + nomics_api_key + "&interval=1d,30d&per-page=10&page=1")
# Read API in json
nomics_df = pd.read_json(nomics_currency_url)
# Create an empty DataFrame for top cryptocurrencies by market cap
top_cryptos_df = pd.DataFrame()
# Get rank, crytocurrency, price, price_date, market cap
top_cryptos_df = nomics_df[['rank', 'logo_url', 'name', 'currency', 'price', 'price_date', 'market_cap']]
# This code gives us the sidebar on streamlit for the different dashboards
option = st.sidebar.selectbox("Dashboards", ('Top 10 Cryptocurrencies by Market Cap', 'Time-Series Forecasting - FB Prophet', "LSTM Model", 'Keras Model', 'Machine Learning Classifier - AdaBoost', 'Support Vector Machines', 'Logistic Regression'))
# Rename column labels
columns=['Rank', 'Logo', 'Currency', 'Symbol', 'Price (USD)', 'Price Date', 'Market Cap']
top_cryptos_df.columns=columns
# Set rank as index
top_cryptos_df.set_index('Rank', inplace=True)
# Convert text data type to numerical data type
top_cryptos_df['Market Cap'] = top_cryptos_df['Market Cap'].astype('int')
# Convert Timestamp to date only
top_cryptos_df['Price Date']=pd.to_datetime(top_cryptos_df['Price Date']).dt.date
# Replace nomics ticker symbol with yfinance ticker symbol
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("LUNA","LUNA1")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("FTXTOKEN","FTT")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("UNI","UNI1")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("AXS2","AXS")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("SAND2","SAND")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("HARMONY","ONE1")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("HELIUM","HNT")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("GRT","GRT1")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("IOT","MIOTA")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("BLOCKSTACK","STX")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("FLOW2","FLOW")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("BITTORRENT","BTT")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("AMP2","AMP")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("HOT","HOT1")
# Format Market Cap with commas to separate thousands
top_cryptos_df["Market Cap"] = top_cryptos_df.apply(lambda x: "{:,}".format(x['Market Cap']), axis=1)
# Formatting Price (USD) to currency
top_cryptos_df["Price (USD)"] = top_cryptos_df["Price (USD)"].apply(lambda x: format_currency(x, currency="USD", locale="en_US"))
# Convert your links to html tags
def path_to_image_html(Logo):
return '<img src="'+ Logo +'" width=30 >'
# Pulls list of cryptocurrencies from nomics and concatenates to work with Yahoo Finance
coin = top_cryptos_df['Symbol'] + "-USD"
# Creates a dropdown list of cryptocurrencies based on top 100 list
dropdown = st.sidebar.multiselect("Select 1 coin to analyze", coin, default=['SOL-USD'])
# Create start date for analysis
start = st.sidebar.date_input('Start Date', value = pd.to_datetime('2020-01-01'))
# Create end date for analysis
end = st.sidebar.date_input('End Date', value = pd.to_datetime('today'))
# This option gives users the ability to view the current top 100 cryptocurrencies
if option == 'Top 10 Cryptocurrencies by Market Cap':
# Displays image in dataframe
top_cryptos_df.Logo = path_to_image_html(top_cryptos_df.Logo)
st.write(top_cryptos_df.to_html(escape=False), unsafe_allow_html=True)
st.text("")
# Line charts are created based on dropdown selection
if len(dropdown) > 0:
coin_choice = dropdown[0]
coin_list = yf.download(coin_choice,start,end)
coin_list['Ticker'] = coin_choice
coin_list.index=pd.to_datetime(coin_list.index).date
# Displays dataframe of selected cryptocurrency
st.subheader(f"Selected Crypto: {dropdown}")
st.dataframe(coin_list)
st.text("")
# Display coin_list into a chart
st.subheader(f'Selected Crypto Over Time: {dropdown}')
st.line_chart(coin_list['Adj Close'])
# This option gives users the ability to use FB Prophet
if option == 'Time-Series Forecasting - FB Prophet':
st.subheader("Time-Series Forecasting - FB Prophet")
# Line charts are created based on dropdown selection
if len(dropdown) > 0:
coin_choice = dropdown[0]
coin_list = yf.download(coin_choice,start,end)
coin_list['Ticker'] = coin_choice
# Reset the index so the date information is no longer the index
coin_list_df = coin_list.reset_index().filter(['Date','Adj Close'])
# Label the columns ds and y so that the syntax is recognized by Prophet
coin_list_df.columns = ['ds','y']
# Drop NaN values form the coin_list_df DataFrame
coin_list_df = coin_list_df.dropna()
# Call the Prophet function and store as an object
model_coin_trends = Prophet()
# Fit the time-series model
model_coin_trends.fit(coin_list_df)
# Create a future DataFrame to hold predictions
# Make the prediction go out as far as 60 days
periods = st.number_input("Enter number of prediction days", 30)
future_coin_trends = model_coin_trends.make_future_dataframe(periods = 30, freq='D')
# Make the predictions for the trend data using the future_coin_trends DataFrame
forecast_coin_trends = model_coin_trends.predict(future_coin_trends)
# Plot the Prophet predictions for the Coin trends data
st.markdown(f"Predictions Based on {dropdown} Trends Data")
st.pyplot(model_coin_trends.plot(forecast_coin_trends));
# Set the index in the forecast_coin_trends DataFrame to the ds datetime column
forecast_coin_trends = forecast_coin_trends.set_index('ds')
# View only the yhat,yhat_lower and yhat_upper columns in the DataFrame
forecast_coin_trends_df = forecast_coin_trends[['yhat', 'yhat_lower', 'yhat_upper']]
# From the forecast_coin_trends_df DataFrame, rename columns
coin_columns=['Most Likely (Average) Forecast', 'Worst Case Prediction', 'Best Case Prediction']
forecast_coin_trends_df.columns=coin_columns
forecast_coin_trends_df.index=pd.to_datetime(forecast_coin_trends_df.index).date
st.subheader(f'{dropdown} - Price Predictions')
st.dataframe(forecast_coin_trends_df)
st.subheader("Price Prediction with Daily Seasonality")
# Create the daily seasonality model
model_coin_seasonality = Prophet(daily_seasonality=True)
# Fit the model
model_coin_seasonality.fit(coin_list_df)
# Predict sales for ## of days out into the future
# Start by making a future dataframe
seasonality_periods = st.number_input("Enter number of future prediction days", 30)
coin_trends_seasonality_future = model_coin_seasonality.make_future_dataframe (periods=seasonality_periods, freq='D')
# Make predictions for each day over the next ## of days
coin_trends_seasonality_forecast = model_coin_seasonality.predict(coin_trends_seasonality_future)
seasonal_figs = model_coin_seasonality.plot_components(coin_trends_seasonality_forecast)
st.pyplot(seasonal_figs)
# This option gives users the ability to use Keras Model
if option == 'Keras Model':
# Line charts are created based on dropdown selection
if len(dropdown) > 0:
coin_choice = dropdown[0]
coin_list = yf.download(coin_choice,start,end)
# Preparing the data
# Displays dataframe of selected cryptocurrency. Isolated columns as trading features for forecasting cryptocurrency.
st.subheader(f"Keras Model")
st.subheader(f"Selected Crypto: {dropdown}")
coin_training_df = coin_list
coin_training_df.index=pd.to_datetime(coin_training_df.index).date
st.dataframe(coin_training_df)
# Define the target set y using "Close" column
y = coin_training_df["Close"]
# Define the features set for X by selecting all columns but "Close" column
X = coin_training_df.drop(columns=["Close"])
# Split the features and target sets into training and testing datasets
# Assign the function a random_state equal to 1
X_train, X_test, y_train, y_test = train_test_split(X,y,random_state=1)
# Create a StandardScaler instance
scaler = StandardScaler()
# Fit the scaler to teh features training dataset
X_scaler = scaler.fit(X_train)
# Fit the scaler to the features training dataset
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
# Define the number of inputs (features) to the model
number_input_features = len(X_train.iloc[0])
# Define the number of neurons in the output layer
st.write("Create Network:")
number_output_neurons = st.number_input("Enter number of neurons in output layer", 1)
# Define the number of hidden nodes for the first hidden layer
hidden_nodes_layer1 = (number_input_features + number_output_neurons)//2
# Define the number of hidden noes for the second hidden layer
hidden_nodes_layer2 = (hidden_nodes_layer1 + number_output_neurons)//2
# Create the Sequential model instance
nn = Sequential()
# User selects activation for 1st hidden layer
first_activation = st.selectbox("Choose 1st hidden layer activation function", ('relu','sigmoid', 'tanh'))
# Add the first hidden layer
nn.add(Dense(units=hidden_nodes_layer1, input_dim=number_input_features, activation=first_activation))
# User selects activation for 2nd hidden layer
second_activation = st.selectbox("Choose 2nd hidden layer activation function", ('relu',' '))
# Add the second hidden layer
nn.add(Dense(units=hidden_nodes_layer2,activation=second_activation))
# User selects activation for output layer
output_activation = st.selectbox("Choose output layer activation function", ('sigmoid',' '))
# Add the output layer to the model specifying the number of output neurons and activation function
nn.add(Dense(units=number_output_neurons, activation=output_activation))
# Define functions
loss = st.selectbox("Choose loss function", ('binary_crossentropy',' '))
optimizer = st.selectbox("Choose optimizer", ('adam',' '))
metrics = st.selectbox("Choose evaluation metric", ('accuracy',' '))
# Compile the Sequential model
nn.compile(loss=loss, optimizer=optimizer, metrics=[metrics])
# Fit the model using 50 epochs and the training data
epochs = st.number_input("Enter number of epochs", 10)
epochs = int(epochs)
fit_model=nn.fit(X_train_scaled, y_train, epochs=epochs)
# Evaluate the model loss and accuracy metrics using the evaluate method and the test data
model_loss, model_accuracy = nn.evaluate(X_test_scaled, y_test, verbose =2)
# Display the model loss and accuracy results
st.write(f"Loss: {model_loss}, Accuracy: {model_accuracy}")
# User selects AdaBoost
if option == 'Machine Learning Classifier - AdaBoost':
# Line charts are created based on dropdown selection
if len(dropdown) > 0:
coin_choice = dropdown[0]
coin_list = yf.download(coin_choice,start,end)
#coin_list['Ticker'] = coin_choice # This tests that the selected ticker is displayed in the DataFrame
# Preparing the data
# Displays dataframe of selected cryptocurrency. Isolated columns as trading features for forecasting cryptocurrency.
st.subheader(f"AdaBoost")
st.subheader(f"Selected Crypto: {dropdown}")
coin_yf_df = coin_list.drop(columns='Adj Close')
coin_yf_df.index=pd.to_datetime(coin_yf_df.index).date
st.dataframe(coin_yf_df)
# Filter the date index and close columns
coin_signals_df = coin_yf_df.loc[:,["Close"]]
# Use the pct_change function to generate returns from close prices
coin_signals_df["Actual Returns"] = coin_signals_df["Close"].pct_change()
# Drop all NaN values from the DataFrame
coin_signals_df = coin_signals_df.dropna()
# Set the short window and long window
short_window = st.number_input("Set a short window:", 2)
short_window = int(short_window)
long_window = st. number_input("Set a long window:", 10)
long_window = int(long_window)
# Generate the fast and slow simple moving averages
coin_signals_df["SMA Fast"] = coin_signals_df["Close"].rolling(window=short_window).mean()
coin_signals_df["SMA Slow"] = coin_signals_df["Close"].rolling(window=long_window).mean()
coin_signals_df = coin_signals_df.dropna()
# Initialize the new Signal Column
coin_signals_df['Signal'] = 0.0
coin_signals_df['Signal'] = coin_signals_df['Signal']
# When Actual Returns are greater than or equal to 0, generate signal to buy stock long
coin_signals_df.loc[(coin_signals_df["Actual Returns"] >= 0), "Signal"] = 1
# When Actual Returns are less than 0, generate signal to sell stock short
coin_signals_df.loc[(coin_signals_df["Actual Returns"] < 0), "Signal"] = -1
# Calculate the strategy returns and add them to the coin_signals_df DataFrame
coin_signals_df["Strategy Returns"] = coin_signals_df["Actual Returns"] * coin_signals_df["Signal"].shift()
# Plot Strategy Returns to examine performance
st.write(f"{dropdown} Performance by Strategy Returns")
st.line_chart ((1 + coin_signals_df['Strategy Returns']).cumprod())
# Split data into training and testing datasets
# Assign a copy of the sma_fast and sma_slow columns to a features DataFrame called X
X = coin_signals_df[['SMA Fast', 'SMA Slow']].shift().dropna()
# Create the target set selecting the Signal column and assigning it to y
y = coin_signals_df["Signal"]
st.subheader("Training Model")
# Select the start of the training period
st.caption(f'Training Begin Date starts at the selected "Start Date": {start}')
training_begin = X.index.min()
# Select the ending period for the trianing data with an offet timeframe
months = st.number_input("Enter number of months for DateOffset", 1)
training_end = X.index.min() + DateOffset(months=months)
st.caption(f'Training End Date ends: {training_end}')
# Generate the X_train and y_train DataFrame
X_train = X.loc[training_begin:training_end]
y_train = y.loc[training_begin:training_end]
# Generate the X_test and y_test DataFrames
X_test = X.loc[training_end+DateOffset(days=1):]
y_test = y.loc[training_end+DateOffset(days=1):]
# Scale the features DataFrame
# Create a StandardScaler instance
scaler = StandardScaler()
# Apply the scaler model to fit the X_train data
X_scaler = scaler.fit(X_train)
# Transform the X_train and X_test DataFrame using the X_scaler
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
# Initiate the AdaBoostClassifier model instance
ab_model = AdaBoostClassifier()
# Fit the model using the training data
ab_model.fit(X_train,y_train)
# Use the testing dataset to generate the predictions for the new model
ab_y_pred = ab_model.predict(X_test)
# Backtest the AdaBoost Model to evaluate performance
st.write('**AdaBoost Testing Classification Report**')
ab_testing_report = classification_report(y_test,ab_y_pred)
# Print the classification report
st.write(ab_testing_report)
# Create a new empty predictions DataFrame.
# Create a predictions DataFrame
alt_predictions_df = pd.DataFrame(index=X_test.index)
# Add the SVM model predictions to the DataFrame
alt_predictions_df['Predicted'] = ab_y_pred
# Add the actual returns to the DataFrame
alt_predictions_df['Actual Returns'] = coin_signals_df['Actual Returns']
# Add the strategy returns to the DataFrame
alt_predictions_df['Strategy Returns'] = (alt_predictions_df['Actual Returns'] * alt_predictions_df['Predicted'])
st.subheader(f"Predictions: {dropdown}")
st.dataframe(alt_predictions_df)
st.subheader(f"Actual Returns vs. Strategy Returns")
st.line_chart((1 + alt_predictions_df[['Actual Returns','Strategy Returns']]).cumprod())
#### SUPPORT VECTOR MACHINES ####
# This option gives users the ability to use SVM model
if option == 'Support Vector Machines':
# Line charts are created based on dropdown selection
if len(dropdown) > 0:
coin_choice = dropdown[0]
coin_list = yf.download(coin_choice,start,end)
# Displays dataframe of selected cryptocurrency.
st.subheader(f'Support Vector Machines')
st.subheader(f"Selected Crypto: {dropdown}")
coin_yf_df = coin_list.drop(columns='Adj Close')
coin_yf_df_copy = coin_yf_df
coin_yf_df.index=pd.to_datetime(coin_yf_df.index).date
st.dataframe(coin_yf_df)
# Filter the date index and close columns
coin_signals_df = coin_yf_df_copy.loc[:,["Close"]]
# Use the pct_change function to generate returns from close prices
coin_signals_df["Actual Returns"] = coin_signals_df["Close"].pct_change()
# Drop all NaN values from the DataFrame
coin_signals_df = coin_signals_df.dropna()
# Generate the fast and slow simple moving averages (user gets to select window size)
short_window = st.number_input("Set a short window:", 4)
short_window = int(short_window)
long_window = st. number_input("Set a long window:", 85)
long_window = int(long_window)
# Generate the fast and slow simple moving averages
coin_signals_df["SMA Fast"] = coin_signals_df["Close"].rolling(window=short_window).mean()
coin_signals_df["SMA Slow"] = coin_signals_df["Close"].rolling(window=long_window).mean()
coin_signals_df = coin_signals_df.dropna()
# Initialize the new Signal Column
coin_signals_df['Signal'] = 0.0
coin_signals_df['Signal'] = coin_signals_df['Signal']
# When Actual Returns are greater than or equal to 0, generate signal to buy stock long
coin_signals_df.loc[(coin_signals_df["Actual Returns"] >= 0), "Signal"] = 1
# When Actual Returns are less than 0, generate signal to sell stock short
coin_signals_df.loc[(coin_signals_df["Actual Returns"] < 0), "Signal"] = -1
# Calculate the strategy returns and add them to the coin_signals_df DataFrame
coin_signals_df["Strategy Returns"] = coin_signals_df["Actual Returns"] * coin_signals_df["Signal"].shift()
# Plot Strategy Returns to examine performance
st.write(f"{dropdown} Performance by Strategy Returns")
st.line_chart ((1 + coin_signals_df['Strategy Returns']).cumprod())
# Assign a copy of the sma_fast and sma_slow columns to a features DataFrame called X
svm_X = coin_signals_df[['SMA Fast', 'SMA Slow']].shift().dropna()
# Create the target set selecting the Signal column and assigning it to y
svm_y = coin_signals_df["Signal"]
# Select the start of the training period
svm_training_begin = svm_X.index.min()
#### Setting the training data to three or above 3 seems to throw off callculations where one signal or the other isnt predicted ####
#### Is there a way to make this a selection the user makes? ####
# Select the ending period for the training data with an offset of 2 months
months = st.number_input("Enter number of months for DateOffset", 2)
svm_training_end = svm_X.index.min() + DateOffset(months=months)
st.caption(f'Training End Date ends: {svm_training_end}')
# Generate the X_train and y_train DataFrame
svm_X_train = svm_X.loc[svm_training_begin:svm_training_end]
svm_y_train = svm_y.loc[svm_training_begin:svm_training_end]
# Generate the X_test and y_test DataFrames
svm_X_test = svm_X.loc[svm_training_end+DateOffset(days=1):]
svm_y_test = svm_y.loc[svm_training_end+DateOffset(days=1):]
# Scale the features DataFrame with StandardScaler
svm_scaler = StandardScaler()
# Apply the scaler model to fit the X_train data
svm_X_scaler = svm_scaler.fit(svm_X_train)
# Transform the X_train and X_test DataFrame using the X_scaler
svm_X_train_scaled = svm_X_scaler.transform(svm_X_train)
svm_X_test_scaled = svm_X_scaler.transform(svm_X_test)
## From SVM, instantiate SVC classifier model instance
svm_model = svm.SVC()
# Fit the model with the training data
svm_model.fit(svm_X_train,svm_y_train)
# Use the testing dataset to generate the predictions
svm_y_pred = svm_model.predict(svm_X_test)
# Use a classification report to evaluate the model using the predictions and testing data
st.write('**Support Vector Machines Classification Report**')
svm_testing_report = classification_report(svm_y_test,svm_y_pred)
# Print the classification report
st.write(svm_testing_report)
# Create a predictions DataFrame
svm_predictions_df = pd.DataFrame(index=svm_X_test.index)
# Add the SVM model predictions to the DataFrame
svm_predictions_df['Predicted'] = svm_y_pred
# Add the actual returns to the DataFrame
svm_predictions_df['Actual Returns'] = coin_signals_df['Actual Returns']
# Add the strategy returns to the DataFrame
svm_predictions_df['Strategy Returns'] = (svm_predictions_df['Actual Returns'] * svm_predictions_df['Predicted'])
st.subheader(f"Predictions: {dropdown}")
st.dataframe(svm_predictions_df)
st.subheader(f"Actual Returns vs. Strategy Returns")
st.line_chart((1 + svm_predictions_df[['Actual Returns','Strategy Returns']]).cumprod())
#### LSTM Model ####
# This option gives users the ability to use LSTM model
if option == 'LSTM Model':
# Line charts are created based on dropdown selection
if len(dropdown) > 0:
coin_choice = dropdown[0]
coin_list = yf.download(coin_choice,start,end)
# Preparing the data
# Displays dataframe of selected cryptocurrency. Isolated columns as trading features for forecasting cryptocurrency.
st.subheader(f"LSTM Model")
st.subheader(f"Selected Crypto: {dropdown}")
coin_training_df = coin_list#[["Close", "High", "Low", "Open", "Volume"]]
coin_training_df.index=pd.to_datetime(coin_training_df.index).date
coin_training_df["Date"]= | pd.to_datetime(coin_training_df.index) | pandas.to_datetime |
import pandas
from skbio.stats.composition import clr
from scipy.stats import mannwhitneyu
from scipy.stats import kruskal
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
df=pandas.read_csv("./../../../Data_21Dec20/species_data.csv",index_col=0)
y1=pandas.read_csv("./../../../METADATA/data_194.csv",index_col=0)
y2= | pandas.read_csv("./../../../METADATA/data_test.csv",index_col=0) | pandas.read_csv |
from eemeter.io.serializers import ArbitraryEndSerializer
from datetime import datetime
import pandas as pd
import numpy as np
import pytz
import pytest
@pytest.fixture
def serializer():
return ArbitraryEndSerializer()
def test_no_records(serializer):
df = serializer.to_dataframe([])
assert df.empty
assert all(df.columns == ["value", "estimated"])
def test_single_valid_record(serializer):
records = [
{
"start": datetime(2000, 1, 1, tzinfo=pytz.UTC),
"end": datetime(2000, 1, 2, tzinfo=pytz.UTC),
"value": 1,
},
]
df = serializer.to_dataframe(records)
assert df.value[datetime(2000, 1, 1, tzinfo=pytz.UTC)] == 1
assert not df.estimated[datetime(2000, 1, 1, tzinfo=pytz.UTC)]
assert pd.isnull(df.value[datetime(2000, 1, 2, tzinfo=pytz.UTC)])
assert not df.estimated[datetime(2000, 1, 2, tzinfo=pytz.UTC)]
def test_single_valid_record_end_only(serializer):
records = [
{
"end": datetime(2000, 1, 2, tzinfo=pytz.UTC),
"value": 1,
},
]
df = serializer.to_dataframe(records)
assert pd.isnull(df.value[datetime(2000, 1, 2, tzinfo=pytz.UTC)])
assert not df.estimated[datetime(2000, 1, 2, tzinfo=pytz.UTC)]
def test_single_valid_record_with_estimated(serializer):
records = [
{
"start": datetime(2000, 1, 1, tzinfo=pytz.UTC),
"end": datetime(2000, 1, 2, tzinfo=pytz.UTC),
"value": 1,
"estimated": True,
},
]
df = serializer.to_dataframe(records)
assert df.value[datetime(2000, 1, 1, tzinfo=pytz.UTC)] == 1
assert df.estimated[datetime(2000, 1, 1, tzinfo=pytz.UTC)]
assert pd.isnull(df.value[datetime(2000, 1, 2, tzinfo=pytz.UTC)])
assert not df.estimated[datetime(2000, 1, 2, tzinfo=pytz.UTC)]
def test_record_no_end(serializer):
records = [
{
"value": 1,
},
]
with pytest.raises(ValueError):
serializer.to_dataframe(records)
def test_record_no_value(serializer):
records = [
{
"end": datetime(2000, 1, 2, tzinfo=pytz.UTC),
},
]
with pytest.raises(ValueError):
serializer.to_dataframe(records)
def test_multiple_records(serializer):
records = [
{
"end": datetime(2000, 1, 1, tzinfo=pytz.UTC),
"value": 1,
},
{
"end": datetime(2000, 1, 2, tzinfo=pytz.UTC),
"value": 2,
},
]
df = serializer.to_dataframe(records)
assert df.value[datetime(2000, 1, 1, tzinfo=pytz.UTC)] == 2
assert not df.estimated[datetime(2000, 1, 1, tzinfo=pytz.UTC)]
assert pd.isnull(df.value[datetime(2000, 1, 2, tzinfo=pytz.UTC)])
assert not df.estimated[datetime(2000, 1, 2, tzinfo=pytz.UTC)]
def test_record_end_before_start(serializer):
records = [
{
"start": datetime(2000, 1, 2, tzinfo=pytz.UTC),
"end": datetime(2000, 1, 1, tzinfo=pytz.UTC),
"value": 1,
},
]
with pytest.raises(ValueError):
serializer.to_dataframe(records)
def test_to_records(serializer):
data = {"value": [1, np.nan], "estimated": [True, False]}
columns = ["value", "estimated"]
index = pd.date_range('2000-01-01', periods=2, freq='D')
df = | pd.DataFrame(data, index=index, columns=columns) | pandas.DataFrame |
import pandas as pd
def strip_chars(subdomain_df):
"""Strip *.'s and www.'s"""
subdomain_df = subdomain_df.str.lower()
subdomain_df = subdomain_df.str.replace("www.", "")
subdomain_df = subdomain_df.str.replace("\*.", "")
return subdomain_df
def filter_domain(subdomain_df, domain):
"""Filter subdomains that don't match the domain name."""
subdomain_df = subdomain_df[subdomain_df.str.endswith(f".{domain}")]
return subdomain_df
def reindex_df(subdomain_df):
"""Reindex subdomain dataframe and starts at indice 1."""
subdomain_df.drop_duplicates(inplace=True)
subdomain_df.reset_index(drop=True, inplace=True)
subdomain_df.index += 1
return subdomain_df
def concat_dfs(subdomain_dfs, headers):
"""Concatenate, fill N/A's and rename columns."""
concat_df = | pd.concat(subdomain_dfs, axis="columns") | pandas.concat |
import pathlib
from pathlib import Path
from typing import Union, Tuple
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
class QuestionnaireAnalysis:
"""
Reads and analyzes data generated by the questionnaire experiment.
Should be able to accept strings and pathlib.Path objects.
"""
def __init__(self, data_fname: Union[pathlib.Path, str]):
"""Initiate QuestionnaireAnalysis class with following arguments.
Arguments:
data_fname {Union[pathlib.Path, str]} -- [path to .json file ocntaining subjects' data]
"""
self.data_fname = Path(data_fname)
if not self.data_fname.is_file():
raise ValueError
def read_data(self):
"""Reads the json data located in self.data_fname into memory, to
the attribute self.data.
"""
self.data = | pd.read_json(self.data_fname) | pandas.read_json |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
load message and categories data from csv files and merge them
parameters:
messages_filepath: file of mesaage.csv
categories_filepath: file path of categories.csv
return:
df : a dataframe with merged columns
"""
messages = pd.read_csv(messages_filepath)
messages.head()
categories = | pd.read_csv(categories_filepath) | pandas.read_csv |
#!/usr/bin/env python
from __future__ import print_function
from matplotlib import use
use('Agg')
import matplotlib.pyplot as plt
import pysam
import numpy as np
from collections import defaultdict
import seaborn as sns
import pandas as pd
import sys
import re
from builtins import zip, range, map
if sys.version_info < (3, 0):
import string
complement = string.maketrans('ACTGN','TGACN')
else:
complement = str.maketrans('ACTGN','TGACN')
def reverse_complement(seq):
return seq.translate(complement)[::-1]
def norm_data(d):
d['base_fraction'] = np.true_divide(d['base_count'], d.base_count.sum())
return d
def make_dataframe(nucleotide_dict, end):
return pd.DataFrame.from_dict(nucleotide_dict[end], orient = 'index') \
.reset_index() \
.rename(columns = {'index':'positions'})\
.assign(read_end = end) \
.pipe(pd.melt, id_vars = ['positions','read_end'],
value_name = 'base_count', var_name='base')\
.groupby(['read_end','positions']) \
.apply(norm_data) \
.reset_index()\
.fillna(0) \
.query('base != "N"')\
.drop('index',axis=1)
def plot_ends(df, figurename):
positions_consider = df.positions.max()
with sns.plotting_context('paper',font_scale = 1.2), \
sns.color_palette("husl", 8):
p = sns.FacetGrid(data = df, col = 'read_end',
hue = 'base', aspect = 1.5)
p.map(plt.plot, 'positions','base_fraction')
xt = range(1, positions_consider + 1, 2)
for i, ax in enumerate(p.fig.axes):
ax.set_xticks(xt)
ax.set_xticklabels(xt, rotation=90)
p.add_legend()
p.set_titles('{col_name}')
p.set_axis_labels('Positions','Fraction')
p.savefig(figurename, transparent=True)
print('Written %s ' %figurename)
return 0
def good_cigar(cigar):
cigar = str(cigar)
return re.findall('[MHSID]',cigar) == ['M']
def extract_nucleotides(bam, positions_consider):
end_nucleotide_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
positions = range(positions_consider)
for count, aln in enumerate(bam):
condition_1 = (not aln.is_unmapped and not aln.is_supplementary and not aln.is_secondary)
condition_2 = (not aln.is_duplicate and aln.mapping_quality > 1)
condition_3 = good_cigar(aln.cigarstring)
if condition_1 and condition_2:# and condition_3:
#sequence = str(aln.query_alignment_sequence)
read = "5'" if aln.is_read1 else "3'"
sequence = str(aln.query_sequence)
if aln.is_reverse:
sequence = reverse_complement(sequence)[:positions_consider]
else:
sequence = sequence[:positions_consider]
for pos, base in zip(positions, sequence):
end_nucleotide_dict[read][pos][base] += 1
if count % 10000000 == 0:
print('Parsed %i alignments' %(count))
return end_nucleotide_dict
def main():
if len(sys.argv) != 3:
sys.exit('[usage] python %s <bamfile> <outprefix>' %(sys.argv[0]))
positions_consider = 15
bam_file = sys.argv[1]
outprefix = sys.argv[2]
figurename = outprefix + '.pdf'
tablename = outprefix + '.csv'
with pysam.Samfile(bam_file,'rb') as bam:
end_nucleotide_dict = extract_nucleotides(bam, positions_consider)
df = pd.concat([make_dataframe(end_nucleotide_dict, end) for end in ["5'","3'"]])
df.to_csv(tablename, index=False)
df = | pd.read_csv(tablename) | pandas.read_csv |
# Import necessary packages
import pandas as pd
pd.options.display.max_columns = None
import numpy as np
import math
import geopandas as gp
from shapely.geometry import Point, mapping, LineString
import dask.dataframe as dd
from dask.diagnostics import ProgressBar
import dask.distributed
from datetime import datetime
import warnings
import os
warnings.filterwarnings("ignore")
#Define WGS 1984 coordinate system
wgs84 = {'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84', 'no_defs': True}
#Define NAD 1983 StatePlane California III
cal3 = {'proj': 'lcc +lat_1=37.06666666666667 +lat_2=38.43333333333333 +lat_0=36.5 +lon_0=-120.5 +x_0=2000000 +y_0=500000.0000000002', 'ellps': 'GRS80', 'datum': 'NAD83', 'no_defs': True}
# UK Paths
# MAIN_DIR = 'S:/CMP/Transit/Speed'
# NETCONF_DIR = 'S:/CMP/Network Conflation'
# APC_FILE = 'S:/CMP/Transit/Speed/APC_2019_SPRING/APC_2019_SPRING.txt'
# SFCTA Paths
MAIN_DIR = r'Q:\CMP\LOS Monitoring 2019\Transit\Speed'
NETCONF_DIR = r'Q:\CMP\LOS Monitoring 2019\Network_Conflation'
APC_FILE = r'Q:\Data\Observed\Transit\Muni\APC\CMP_2019\APC_2019_SPRING.txt'
# Convert original coordinate system to state plane
stops = gp.read_file(os.path.join(MAIN_DIR, 'stops.shp'))
stops = stops.to_crs(cal3)
stops['stop_name'] = stops['stop_name'].str.lower()
stops[['street_1','street_2']] = stops.stop_name.str.split('&', expand=True) #split stop name into operating street and intersecting street
# CMP network
cmp_segs_org=gp.read_file(os.path.join(NETCONF_DIR, 'cmp_roadway_segments.shp'))
cmp_segs_prj = cmp_segs_org.to_crs(cal3)
cmp_segs_prj['cmp_name'] = cmp_segs_prj['cmp_name'].str.replace('/ ','/')
cmp_segs_prj['cmp_name'] = cmp_segs_prj['cmp_name'].str.lower()
cmp_segs_prj['Length'] = cmp_segs_prj.geometry.length
cmp_segs_prj['Length'] = cmp_segs_prj['Length'] * 3.2808 #meters to feet
# INRIX network
inrix_net=gp.read_file(os.path.join(NETCONF_DIR, 'inrix_xd_sf.shp'))
inrix_net['RoadName'] = inrix_net['RoadName'].str.lower()
cmp_inrix_corr = pd.read_csv(os.path.join(NETCONF_DIR, 'CMP_Segment_INRIX_Links_Correspondence.csv'))
# Create a buffer zone for each cmp segment
ft=160 # According to the memo from last CMP cycle
mt=round(ft/3.2808,4)
stops_buffer=stops.copy()
stops_buffer['geometry'] = stops_buffer.geometry.buffer(mt)
#stops_buffer.to_file(os.path.join(MAIN_DIR, 'stops_buffer.shp'))
# cmp segments intersecting transit stop buffer zone
cmp_segs_intersect=gp.sjoin(cmp_segs_prj, stops_buffer, op='intersects').reset_index()
stops['near_cmp'] = 0
cmp_segs_intersect['name_match']=0
for stop_idx in range(len(stops)):
stop_id = stops.loc[stop_idx, 'stop_id']
stop_geo = stops.loc[stop_idx]['geometry']
stop_names = stops.loc[stop_idx]['street_1'].split('/')
if 'point lobos' in stop_names:
stop_names = stop_names + ['geary']
if stop_id == 7357:
stop_names = stop_names + ['third st']
cmp_segs_int = cmp_segs_intersect[cmp_segs_intersect['stop_id']==stop_id]
cmp_segs_idx = cmp_segs_intersect.index[cmp_segs_intersect['stop_id']==stop_id].tolist()
near_dis = 5000
if ~cmp_segs_int.empty:
for seg_idx in cmp_segs_idx:
cmp_seg_id = cmp_segs_int.loc[seg_idx, 'cmp_segid']
cmp_seg_geo = cmp_segs_int.loc[seg_idx]['geometry']
cmp_seg_names = cmp_segs_int.loc[seg_idx]['cmp_name'].split('/')
if 'bayshore' in cmp_seg_names:
cmp_seg_names = cmp_seg_names + ['bay shore']
if '3rd st' in cmp_seg_names:
cmp_seg_names = cmp_seg_names + ['third st']
if '19th ave' in cmp_seg_names:
cmp_seg_names = cmp_seg_names + ['19th avenue']
if 'geary' in cmp_seg_names:
cmp_seg_names = cmp_seg_names + ['point lobos']
# Add INRIX street name to be comprehensive
inrix_links = cmp_inrix_corr[cmp_inrix_corr['CMP_SegID']==cmp_seg_id]
inrix_link_names = inrix_net[inrix_net['SegID'].isin(inrix_links['INRIX_SegID'])]['RoadName'].tolist()
inrix_link_names = list(filter(None, inrix_link_names))
if len(inrix_link_names) > 0:
inrix_link_names = list(set(inrix_link_names))
cmp_seg_link_names = cmp_seg_names + inrix_link_names
else:
cmp_seg_link_names = cmp_seg_names
matched_names= [stop_name for stop_name in stop_names if any(cname in stop_name for cname in cmp_seg_link_names)]
if len(matched_names)>0:
stops.loc[stop_idx, 'near_cmp']=1
cmp_segs_intersect.loc[seg_idx, 'name_match']=1
cur_dis = stop_geo.distance(cmp_seg_geo)
cmp_segs_intersect.loc[seg_idx, 'distance']=cur_dis
near_dis = min(near_dis, cur_dis)
stops.loc[stop_idx, 'near_dis']=near_dis
stops_near_cmp = stops[stops['near_cmp']==1]
stops_near_cmp = stops_near_cmp.to_crs(wgs84)
stops_near_cmp_list = stops_near_cmp['stop_id'].unique().tolist()
cmp_segs_near = cmp_segs_intersect[cmp_segs_intersect['name_match']==1]
# Remove mismatched stops determined by manual QAQC review
remove_cmp_stop = [(175, 5546), (175, 5547), (175, 7299), (175, 5544),
(66, 7744),
(214, 7235),
(107, 4735),
(115, 4275),
(143, 4824),
(172, 5603)]
for remove_idx in range(len(remove_cmp_stop)):
rmv_cmp_id = remove_cmp_stop[remove_idx][0]
rmv_stop_id = remove_cmp_stop[remove_idx][1]
remove_df_idx = cmp_segs_near.index[(cmp_segs_near['cmp_segid']==rmv_cmp_id) & (cmp_segs_near['stop_id']==rmv_stop_id)].tolist()[0]
cmp_segs_near = cmp_segs_near.drop([remove_df_idx], axis=0)
# Preprocess APC data
apc_fields = ['EXT_TRIP_ID', 'DIRECTION', 'ACTUAL_DATE', 'VEHICLE_ID', 'CALC_SPEED',
'REV_DISTANCE', 'OPEN_DATE_TIME', 'DWELL_TIME', 'CLOSE_DATE_TIME', 'STOPID']
apc_cmp = | pd.read_csv(APC_FILE, sep='\t', usecols=apc_fields) | pandas.read_csv |
""" Module contains functions to retrieve
and process data from the database folder"""
import pandas as pd
import datetime
def pre_master_data(master_df):
"""
Get all dataset from the database and combine them to one dataframe.
(data pre-processing)
:param master_df: filename
:type master_df: csv file
:return dataframe contains all of the datasets
"""
df = master_df.copy()
df.rename(columns={'timestamp_eastern': 'ts'}, inplace=True)
df['ts'] = pd.to_datetime(df['ts'])
cols_to_keep = ['season', 'adjusted_demand_MW', 'demand_MW', 'hour_ending_eastern', 'ts']
df = df[cols_to_keep]
df['ts'] = | pd.to_datetime(df['ts']) | pandas.to_datetime |
import pandas as pd
import numpy as np
import math
import warnings
import scipy.stats as st
from pandas.core.common import SettingWithCopyWarning
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
from .utils import *
LULC_COLORS_DEFAULT = pd.DataFrame(
columns=['lulc', 'color'],
data=[
['Pasture','#ffc100'],
['Annual crop','#D5A6BD'],
['Tree plantation','#935132'],
['Semi-perennial crop','#C27BA0'],
['Urban infrastructure','#af2a2a'],
['Wetland','#18b08d'],
['Grassland formation','#B8AF4F'],
['Forest formation','#006400'],
['Savanna formation','#32CD32'],
['Water','#0000FF'],
['Other','#5f5f5f'],
['Perennial crop','#D5A6BD'],
['Other non-forest natural formation','#BDB76B']
]
)
class Area_Estimator:
def __init__(self, samples, weight_col, strata_col, lulc_col, id_col,
year_col, pixel_size, confidence_interval, lulc_colors=None, verbose = True):
self.samples = samples
self.weight_col = weight_col
self.lulc_col = lulc_col
self.id_col = id_col
self.year_col = year_col
self.pixel_size = pixel_size
self.strata_col = strata_col
self.verbose = verbose
self.confidence_interval = confidence_interval
# Density points according the confidence interval
self.std_norm = round(st.norm.ppf(1 - ( 1 - confidence_interval ) / 2), 2)
# Area in hectares
self.pixel_area = (self.pixel_size * self.pixel_size) / 10000
self.lulc_list = self._unique_vals(self.lulc_col)
self.year_list = self._unique_vals(self.year_col)
# Min and max years for the lulc change analysis
self.min_year = self.samples[self.year_col].min()
self.max_year = self.samples[self.year_col].max()
if lulc_colors is None:
self.lulc_colors = LULC_COLORS_DEFAULT
else:
self.lulc_colors = lulc_colors
def _unique_vals(self, column):
return list(self.samples[column].unique())
def _verbose(self, *args, **kwargs):
if self.verbose:
ttprint(*args, **kwargs)
def _population(self, samples):
population = (1 * samples[self.weight_col]).sum()
return population, (population * self.pixel_area)
def _calc_se(self, samples, mask, population):
def _strata_variance(samples_strata, var_map_correct_s):
nsamples_s, _ = samples_strata.shape
population_s = (1 * samples_strata[self.weight_col]).sum()
strata_var = 0
if population_s > 0:
strata_var = math.pow(population_s,2) \
* (1 - nsamples_s / population_s) \
* var_map_correct_s / nsamples_s
return strata_var
args = []
var_map_correct_s = np.var(mask.astype('int'))
for name, samples_strata in samples.groupby(self.strata_col):
args.append((samples_strata, var_map_correct_s))
glob_var = 0
for strata_var in do_parallel(_strata_variance, args, backend='threading'):
glob_var += strata_var
glob_var = 1 / math.pow(population, 2) * glob_var
glob_se = self.std_norm * math.sqrt(glob_var)
return glob_se
def _calc_area(self, samples, year, value_col, value_list, region_label):
result = []
for value in value_list:
try:
lulc_mask = (samples[value_col] == value)
samples.loc[:, 'ESTIMATOR'] = 0
samples.loc[lulc_mask, 'ESTIMATOR'] = 1
population, area_population = self._population(samples)
lulc_proportion = ((samples['ESTIMATOR'] * samples[self.weight_col]).sum()) / population
lulc_se = self._calc_se(samples, lulc_mask, population)
lulc_area = lulc_proportion * area_population
result.append([value, lulc_area, lulc_proportion, lulc_se, year, region_label])
except:
self._verbose(f'_calc_area ERROR for value_col={value_col} value={value}, ' + \
'year={year}, region_label={region_label} ')
continue
return result
def _filter_samples(self, region_filter = None):
return self.samples if (region_filter is None) else self.samples[region_filter]
def _valid_year_range(self, year, n_years, backward = False):
step = -1 if backward else 1
start_year = year + step
end_year = year + (n_years * step) + 1
end_year = self.min_year - 1 if end_year < self.min_year else end_year
end_year = self.max_year + 1 if end_year > self.max_year else end_year
#if start_year == end_year:
# return [ year ]
#else:
result =[ y for y in range(start_year, end_year, step) ]
return result
def _change_mask(self, samples, lulc_arr, year, past_arr, past_nyears, future_arr, future_nyears):
past_years = self._valid_year_range(year, past_nyears, backward = True)
future_years = self._valid_year_range(year, future_nyears, backward = False)
# Considering all the samples
past_mask = np.logical_and(
self.samples[self.lulc_col].isin(past_arr),
self.samples[self.year_col].isin(past_years)
)
#print(past_arr, past_years, np.unique(past_mask, return_counts=True))
# Considering all the samples
future_mask = np.logical_and(
self.samples[self.lulc_col].isin(future_arr),
self.samples[self.year_col].isin(future_years)
)
past_fur_mask = np.logical_or(past_mask, future_mask)
n_years = len(past_years) + len(future_years)
# Considering all the samples
samples_ids = self.samples[[self.id_col]].copy()
samples_ids['past_fur_mask'] = 0
samples_ids['past_fur_mask'].loc[past_fur_mask] = 1
past_fur_agg = samples_ids[past_fur_mask][['id', 'past_fur_mask']].groupby('id').sum()
past_fur_ids = past_fur_agg[past_fur_agg['past_fur_mask'] == n_years].index
# Considering samples passed as params
change_mask = np.logical_and(
samples[self.lulc_col].isin(lulc_arr),
samples[self.id_col].isin(past_fur_ids)
)
#print('change_mask', samples.shape)
change_mask = np.logical_and(change_mask, samples[self.year_col] == year)
#print(np.unique(change_mask, return_counts=True))
return change_mask
def lulc(self, lulc = None, year = None, region_label = 'Brazil', region_filter = None):
args = []
_samples = self._filter_samples(region_filter)
_lulc_list = self.lulc_list if (lulc is None) else [lulc]
_year_list = self.year_list if (year is None) else [year]
result = []
self._verbose(f'Estimating area of {len(_lulc_list)} LULC classes for {region_label} ({len(_year_list)} years)')
for _year in _year_list:
year_samples = _samples[_samples[self.year_col] == _year]
args.append((year_samples, _year, self.lulc_col, _lulc_list, region_label))
result = []
for year_result in do_parallel(self._calc_area, args):
result += year_result
self._verbose(f'Finished')
result = pd.DataFrame(result, columns=['lulc', 'area_ha', 'proportion', 'se', 'year', 'region'])
result = result.merge(self.lulc_colors, on='lulc')
return result
def lulc_by_region(self, region_col, lulc = None, year = None):
result = []
for region in self._unique_vals(region_col):
result.append(
self.lulc(lulc=lulc, year=year, region_label=region, region_filter=(self.samples[region_col] == region))
)
return pd.concat(result)
def lulc_change(self, lulc_change_label, lulc_arr, past_arr, past_nyears, future_arr, future_nyears,
start_year = None, end_year = None, cumsum = False, color = None, region_label = 'Brazil', region_filter = None):
_samples = self._filter_samples(region_filter)
start_year = self.year_list[0] if (start_year is None) else start_year
end_year = self.year_list[len(self.year_list)] if (end_year is None) else end_year
args = []
self._verbose(f'Estimating lulc change area of {lulc_change_label} for {region_label} ({end_year - start_year} years)')
for _year in range(start_year, end_year):
year_samples = _samples[_samples[self.year_col] == _year]
change_mask = self._change_mask(year_samples, lulc_arr, _year, past_arr, past_nyears, future_arr, future_nyears)
change_col = 'lulc_change'
year_samples[change_col] = 0
year_samples[change_col].loc[change_mask] = 1
args.append((year_samples, _year, change_col, [1], region_label))
result = []
for year_result in do_parallel(self._calc_area, args):
result += year_result
self._verbose(f'Finished')
result = pd.DataFrame(result, columns=['lulc_change', 'area_ha', 'proportion', 'se', 'year', 'region'])
result['lulc_change'] = lulc_change_label
result['year'] += 1
if color is not None:
result['color'] = color
if cumsum:
result['area_ha'] = result['area_ha'].cumsum()
result['proportion'] = result['proportion'].cumsum()
result['se'] = result['se'].cumsum()
return result
def lulc_change_by_region(self, region_col, lulc_change_label, lulc_arr, past_arr, past_nyears, future_arr, future_nyears,
start_year = None, end_year = None, color = None):
result = []
for region in self._unique_vals(region_col):
result.append(
self.lulc_change(lulc_change_label, lulc_arr, past_arr, past_nyears, future_arr, future_nyears,
start_year = start_year, end_year = end_year, region_label=region,
region_filter=(self.samples[region_col] == region), color = color)
)
return pd.concat(result)
def stable_area(self, lulc = None, region_label = 'Brazil', region_filter = None, return_all_years=False):
args = []
_samples = self._filter_samples(region_filter)
_lulc_list = self.lulc_list if (lulc is None) else [lulc]
offset = math.floor((self.min_year - self.max_year)/2)
mid_year = (self.max_year + offset)
nyears = (self.max_year - self.min_year) + 1
args = []
self._verbose(f'Estimating stable area of {len(_lulc_list)} LULC classes for {region_label} ({nyears} years)')
for _lulc in _lulc_list:
args.append((
_lulc, #lulc_change_label
[_lulc], #lulc_arr
[_lulc], #past_arr
nyears, #past_nyears
[_lulc], #future_arr
nyears, #future_nyears
mid_year, #start_year
mid_year + 1, #end_year
False, #cumsum
None, #color
region_label, #region_label
region_filter #region_filter
))
result = []
for lulc_result in do_parallel(self.lulc_change, args, backend='threading'):
result.append(lulc_result)
self._verbose(f'Finished')
result = | pd.concat(result, axis=0) | pandas.concat |
import pandas as pd, DataBase, DataBase_wheels
def equation(name,domains,conditions,LHS,RHS):
return f"""{name}{domains}{'$('+conditions+')' if conditions != '' else conditions}.. {LHS} =E= {RHS};"""
def df(x,kwargs):
"""
Modify x using keyword arguments (dicts,kwarg).
"""
return x if x not in kwargs else kwargs[x]
def create_alias_dict(aliases,list_of_tuples_indices=[]):
return {aliases[i[0]]: aliases[i[1]] for i in list_of_tuples_indices}
def ign_KeyError(dict_,key):
try:
return dict_[key]
except KeyError:
return None
class CES:
""" collection of price indices / demand systems for ces nests """
def __init__(self,version='std',**kwargs):
""" Add version of the model """
self.version = version
def add_symbols(self,db,ns_local,ns_global={},dynamic=False):
""" add gpy_symbols with writing methods. ns is a namespace to update symbol names if they are nonstandard """
for sym in ['map_']:
setattr(self,sym,db[ns_local[sym]])
for sym in ('PbT','PwT','qD','qS','mu','sigma','n'):
setattr(self,sym,db[df(sym,ns_global)])
if dynamic is True:
for sym in ('txE','t0','tE','tx0E'):
setattr(self,sym,db[df(sym,ns_global)])
self.aliases = {i: db.alias_dict0[self.n.name][i] for i in range(len(db.alias_dict0[self.n.name]))}
def add_conditions(self,db,ns_tree,dynamic=False):
""" add gpy_symbols with writing methods. ns_tree is a namespace for relevant subsets to condition the equations on."""
self.conditions = {'zp_out': db[ns_tree['tree_out']].write(),'zp_nout': db[ns_tree['kno_no']].write(), 'q_out': db[ns_tree['bra_o']].write(), 'q_nout': db[ns_tree['bra_no']].write()}
if dynamic is True:
self.conditions = {key: value+' and '+self.txE.write() for key,value in self.conditions.items()}
def a(self,attr,lot_indices=[],l='',lag={}):
""" get the version of the symbol self.attr with alias from list of tuples with indices (lot_indices) and potentially .l added."""
return getattr(self,attr).write(alias=create_alias_dict(self.aliases,lot_indices),l=l,lag=lag)
def run(self,name,conditions=None):
conditions = self.conditions if conditions is None else conditions
nn,mu,sigma2 = self.a('n',[(0,1)]),self.a('mu'),self.a('sigma',[(0,1)])
map_,map_2 = self.a('map_'),self.a('map_',[(0,1),(1,0)])
PwT, PwT2 = self.a('PwT'),self.a('PwT',[(0,1)])
PbT,PbT2 = self.a('PbT'),self.a('PbT',[(0,1)])
qD,qD2 = self.a('qD'), self.a('qD',[(0,1)])
qS,qS2 = self.a('qS'), self.a('qS',[(0,1)])
text = self.zero_profit(f"E_zp_out_{name}",conditions['zp_out'],nn,map_2,qD,qD2,qS,PbT,PwT,PwT2,output=True)+'\n\t'
text += self.zero_profit(f"E_zp_nout_{name}",conditions['zp_nout'],nn,map_2,qD,qD2,qS,PbT,PwT,PwT2,output=False)+'\n\t'
text += self.demand(f"E_q_out_{name}",conditions['q_out'],nn,map_,mu,PwT,PwT2,PbT2,qD,qD2,qS2,sigma2,output=True)+'\n\t'
text += self.demand(f"E_q_nout_{name}",conditions['q_nout'],nn,map_,mu,PwT,PwT2,PbT2,qD,qD2,qS2,sigma2,output=False)
return text
def demand(self,name,conditions,nn,map_,mu,PwT,PwT2,PbT2,qD,qD2,qS2,sigma2,output=False):
""" ces demand """
if output is False:
RHS = f"""sum({nn}$({map_}), {mu} * ({PwT2}/{PwT})**({sigma2}) * {qD2})"""
else:
RHS = f"""sum({nn}$({map_}), {mu} * ({PbT2}/{PwT})**({sigma2}) * {qS2})"""
return equation(name,self.qD.doms(),conditions,qD,RHS)
def zero_profit(self,name,conditions,nn,map_2,qD,qD2,qS,PbT,PwT,PwT2,output=False):
""" zero profits condition """
RHS = f"""sum({nn}$({map_2}), {qD2}*{PwT2})"""
if output is True:
return equation(name,self.PbT.doms(),conditions,f"{PbT}*{qS}",RHS)
else:
return equation(name,self.PwT.doms(),conditions,f"{PwT}*{qD}",RHS)
class CES_norm:
""" collection of price indices / demand systems for ces nests """
def __init__(self,version='std',**kwargs):
""" Add version of the model """
self.version = version
def add_symbols(self,db,ns_local,ns_global={},dynamic=False):
""" add gpy_symbols with writing methods. ns is a namespace to update symbol names if they are nonstandard """
for sym in ['map_']:
setattr(self,sym,db[ns_local[sym]])
for sym in ('PbT','PwT','qD','qS','mu','sigma','n'):
setattr(self,sym,db[df(sym,ns_global)])
if dynamic is True:
for sym in ('txE','t0','tE','tx0E'):
setattr(self,sym,db[df(sym,ns_global)])
self.aliases = {i: db.alias_dict0[self.n.name][i] for i in range(len(db.alias_dict0[self.n.name]))}
def add_conditions(self,db,ns_tree,dynamic=False):
""" add gpy_symbols with writing methods. ns_tree is a namespace for relevant subsets to condition the equations on."""
self.conditions = {'zp_out': db[ns_tree['tree_out']].write(),'zp_nout': db[ns_tree['kno_no']].write(), 'q_out': db[ns_tree['bra_o']].write(), 'q_nout': db[ns_tree['bra_no']].write()}
if dynamic is True:
self.conditions = {key: value+' and '+self.txE.write() for key,value in self.conditions.items()}
def a(self,attr,lot_indices=[],l='',lag={}):
""" get the version of the symbol self.attr with alias from list of tuples with indices (lot_indices) and potentially .l added."""
return getattr(self,attr).write(alias=create_alias_dict(self.aliases,lot_indices),l=l,lag=lag)
def run(self,name,conditions=None):
conditions = self.conditions if conditions is None else conditions
nn,nnn = self.a('n',[(0,1)]), self.a('n',[(0,2)])
mu,mu3 = self.a('mu'), self.a('mu',[(0,2)])
sigma2 = self.a('sigma',[(0,1)])
map_,map_2,map_3 = self.a('map_'),self.a('map_',[(0,1),(1,0)]), self.a('map_',[(0,2)])
PwT,PwT2,PwT3 = self.a('PwT'), self.a('PwT',[(0,1)]), self.a('PwT',[(0,2)])
PbT,PbT2 = self.a('PbT'),self.a('PbT',[(0,1)])
qD,qD2 = self.a('qD'),self.a('qD',[(0,1)])
qS,qS2 = self.a('qS'),self.a('qS',[(0,1)])
text = self.zero_profit(f"E_zp_out_{name}",conditions['zp_out'],nn,map_2,qD,qD2,qS,PbT,PwT,PwT2,output=True)+'\n\t'
text += self.zero_profit(f"E_zp_nout_{name}",conditions['zp_nout'],nn,map_2,qD,qD2,qS,PbT,PwT,PwT2,output=False)+'\n\t'
text += self.demand(f"E_q_out_{name}", conditions['q_out'],nn,nnn,map_,map_3,mu,mu3,sigma2,PwT,PwT2,PwT3,PbT2,qD,qD2,qS2,output=True)+'\n\t'
text += self.demand(f"E_q_nout_{name}", conditions['q_nout'],nn,nnn,map_,map_3,mu,mu3,sigma2,PwT,PwT2,PwT3,PbT2,qD,qD2,qS2,output=False)
return text
def demand(self,name,conditions,nn,nnn,map_,map_3,mu,mu3,sigma2,PwT,PwT2,PwT3,PbT2,qD,qD2,qS2,output=False):
""" ces demand """
if output is False:
RHS = f"""sum({nn}$({map_}), {mu} * ({PwT2}/{PwT})**({sigma2}) * {qD2} / sum({nnn}$({map_3}), {mu3} * ({PwT2}/{PwT3})**({sigma2})))"""
else:
RHS = f"""sum({nn}$({map_}), {mu} * ({PbT2}/{PwT})**({sigma2}) * {qS2} / sum({nnn}$({map_3}), {mu3} * ({PbT2}/{PwT3})**({sigma2})))"""
return equation(name,self.qD.doms(),conditions,qD,RHS)
def zero_profit(self,name,conditions,nn,map_,qD,qD2,qS,PbT,PwT,PwT2,output=False):
""" zero profits condition """
RHS = f"""sum({nn}$({map_}), {qD2}*{PwT2})"""
if output is True:
return equation(name,self.PbT.doms(),conditions,f"{PbT}*{qS}",RHS)
else:
return equation(name,self.PwT.doms(),conditions,f"{PwT}*{qD}",RHS)
class CET:
""" collection of equations for CET nests """
def __init__(self,version='std',**kwargs):
self.version = version
def add_symbols(self,db,ns_local,ns_global={},**kwargs):
for sym in ['map_']:
setattr(self,sym,db[ns_local[sym]])
for sym in ('PbT','PwT','qD','qS','mu','eta','n','out'):
setattr(self,sym,db[df(sym,ns_global)])
self.aliases = {i: db.alias_dict0[self.n.name][i] for i in range(len(db.alias_dict0[self.n.name]))}
def add_conditions(self,db,ns_tree):
self.conditions = {'zp': db[ns_tree['knots']].write(), 'q_out': db[ns_tree['bra_o']].write(),'q_nout': db[ns_tree['bra_no']].write()}
def a(self,attr,lot_indices=[],l='',lag={}):
""" get the version of the symbol self.attr with alias from list of tuples with indices (lot_indices) and potentially .l added."""
return getattr(self,attr).write(alias=create_alias_dict(self.aliases,lot_indices),l=l,lag=lag)
def run(self,name,conditions=None):
conditions = self.conditions if conditions is None else conditions
nn,mu,eta2,out2 = self.a('n',[(0,1)]),self.a('mu'),self.a('eta',[(0,1)]),self.a('out',[(0,1)])
map_,map_2 = self.a('map_'),self.a('map_',[(0,1),(1,0)])
PwT, PwT2 = self.a('PwT'),self.a('PwT',[(0,1)])
PbT,PbT2 = self.a('PbT'),self.a('PbT',[(0,1)])
qD,qD2 = self.a('qD'), self.a('qD',[(0,1)])
qS,qS2 = self.a('qS'), self.a('qS',[(0,1)])
text = self.zero_profit(f"E_zp_{name}",conditions['zp'],nn,map_2,out2,qD,qD2,qS2,PbT2,PwT,PwT2)+'\n\t'
text += self.demand(f"E_q_out_{name}",conditions['q_out'],nn,map_,mu,PwT,PwT2,PbT,qD,qD2,qS,eta2,output=True)+'\n\t'
text += self.demand(f"E_q_nout_{name}",conditions['q_nout'],nn,map_,mu,PwT,PwT2,PbT,qD,qD2,qS,eta2,output=False)
return text
def zero_profit(self,name,conditions,nn,map_2,out2,qD,qD2,qS2,PbT2,PwT,PwT2):
RHS = f"""sum({nn}$({map_2} and {out2}), {qS2}*{PbT2})+sum({nn}$({map_2} and not {out2}), {qD2}*{PwT2})"""
return equation(name,self.PwT.doms(),conditions,f"{PwT}*{qD}",RHS)
def demand(self,name,conditions,nn,map_,mu,PwT,PwT2,PbT,qD,qD2,qS,eta2,output=False):
if output is False:
RHS = f"""sum({nn}$({map_}), {mu} * ({PwT}/{PwT2})**(-{eta2}) * {qD2})"""
return equation(name,self.qD.doms(),conditions,qD,RHS)
else:
RHS = f"""sum({nn}$({map_}), {mu} * ({PbT}/{PwT2})**(-{eta2}) * {qD2})"""
return equation(name,self.qS.doms(),conditions,qS,RHS)
class CET_norm:
""" collection of price indices / demand systems for CET nests """
def __init__(self,version='std',**kwargs):
""" Add version of the model """
self.version = version
def add_symbols(self,db,ns_local,ns_global={},**kwargs):
for sym in ['map_']:
setattr(self,sym,db[ns_local[sym]])
for sym in ('PbT','PwT','qD','qS','mu','eta','n','out'):
setattr(self,sym,db[df(sym,ns_global)])
self.aliases = {i: db.alias_dict0[self.n.name][i] for i in range(len(db.alias_dict0[self.n.name]))}
def add_conditions(self,db,ns_tree):
self.conditions = {'zp': db[ns_tree['knots']].write(), 'q_out': db[ns_tree['bra_o']].write(),'q_nout': db[ns_tree['bra_no']].write()}
def a(self,attr,lot_indices=[],l='',lag={}):
""" get the version of the symbol self.attr with alias from list of tuples with indices (lot_indices) and potentially .l added."""
return getattr(self,attr).write(alias=create_alias_dict(self.aliases,lot_indices),l=l,lag=lag)
def run(self,name,conditions=None):
conditions = self.conditions if conditions is None else conditions
out2,out3 = self.a('out',[(0,1)]), self.a('out',[(0,2)])
nn,nnn = self.a('n',[(0,1)]), self.a('n',[(0,2)])
mu,mu3 = self.a('mu'), self.a('mu',[(0,2)])
eta2 = self.a('eta',[(0,1)])
map_,map_2,map_3 = self.a('map_'),self.a('map_',[(0,1), (1,0)]), self.a('map_',[(0,2)])
PwT,PwT2,PwT3 = self.a('PwT'), self.a('PwT',[(0,1)]), self.a('PwT',[(0,2)])
PbT,PbT2,PbT3 = self.a('PbT'),self.a('PbT',[(0,1)]),self.a('PbT',[(0,2)])
qD,qD2 = self.a('qD'),self.a('qD',[(0,1)])
qS,qS2 = self.a('qS'),self.a('qS',[(0,1)])
text = self.zero_profit(f"E_zp_{name}",conditions['zp'],nn,map_2,out2,qD,qD2,qS2,PbT2,PwT,PwT2)+'\n\t'
text += self.demand(f"E_q_out_{name}",conditions['q_out'],nn,nnn,map_,map_3,mu,mu3,out3,eta2,qD,qD2,qS,PwT,PwT2,PwT3,PbT,PbT3,output=True)+'\n\t'
text += self.demand(f"E_q_nout_{name}",conditions['q_nout'],nn,nnn,map_,map_3,mu,mu3,out3,eta2,qD,qD2,qS,PwT,PwT2,PwT3,PbT,PbT3,output=False)
return text
def demand(self,name,conditions,nn,nnn,map_,map_3,mu,mu3,out3,eta2,qD,qD2,qS,PwT,PwT2,PwT3,PbT,PbT3,output=False):
if output is False:
RHS = f"""sum({nn}$({map_}), {mu} * ({PwT}/{PwT2})**(-{eta2}) * {qD2}/(sum({nnn}$({map_3} and {out3}), {mu3}*({PbT3}/{PwT2})**(-{eta2}))+sum({nnn}$({map_3} and not {out3}), {mu3}*({PwT3}/{PwT2})**(-{eta2}))))"""
return equation(name,self.qD.doms(),conditions,qD,RHS)
else:
RHS = f"""sum({nn}$({map_}), {mu} * ({PbT}/{PwT2})**(-{eta2}) * {qD2}/(sum({nnn}$({map_3} and {out3}), {mu3}*({PbT3}/{PwT2})**(-{eta2}))+sum({nnn}$({map_3} and not {out3}), {mu3}*({PwT3}/{PwT2})**(-{eta2}))))"""
return equation(name,self.qS.doms(),conditions,qS,RHS)
def zero_profit(self,name,conditions,nn,map_2,out2,qD,qD2,qS2,PbT2,PwT,PwT2):
RHS = f"""sum({nn}$({map_2} and {out2}), {qS2}*{PbT2})+sum({nn}$({map_2} and not {out2}), {qD2}*{PwT2})"""
return equation(name,self.PwT.doms(),conditions,f"{PwT}*{qD}",RHS)
class MNL:
""" collection of price indices / demand systems for MNL nests """
def __init__(self,version='std',**kwargs):
""" Add version of the model """
self.version = version
def add_symbols(self,db,ns_local,ns_global={}):
""" add gpy_symbols with writing methods. ns is a namespace to update symbol names if they are nonstandard """
for sym in ['map_']:
setattr(self,sym,db[ns_local[sym]])
for sym in ('PbT','PwT','qD','qS','mu','sigma','n'):
setattr(self,sym,db[df(sym,ns_global)])
self.aliases = {i: db.alias_dict0[self.n.name][i] for i in range(len(db.alias_dict0[self.n.name]))}
def add_conditions(self,db,ns_tree):
""" add gpy_symbols with writing methods. ns_tree is a namespace for relevant subsets to condition the equations on."""
self.conditions = {'zp_out': db[ns_tree['tree_out']].write(),'zp_nout': db[ns_tree['kno_no']].write(), 'q_out': db[ns_tree['bra_o']].write(), 'q_nout': db[ns_tree['bra_no']].write()}
def a(self,attr,lot_indices=[],l='',lag={}):
""" get the version of the symbol self.attr with alias from list of tuples with indices (lot_indices) and potentially .l added."""
return getattr(self,attr).write(alias=create_alias_dict(self.aliases,lot_indices),l=l,lag=lag)
def run(self,name,conditions=None):
conditions = self.conditions if conditions is None else conditions
nn,nnn = self.a('n',[(0,1)]), self.a('n',[(0,2)])
mu,mu3 = self.a('mu'), self.a('mu',[(0,2)])
sigma2 = self.a('sigma',[(0,1)])
map_,map_2,map_3 = self.a('map_'),self.a('map_',[(0,1),(1,0)]), self.a('map_',[(0,2)])
PwT,PwT2,PwT3 = self.a('PwT'), self.a('PwT',[(0,1)]), self.a('PwT',[(0,2)])
PbT,PbT2 = self.a('PbT'),self.a('PbT',[(0,1)])
qD,qD2 = self.a('qD'),self.a('qD',[(0,1)])
qS,qS2 = self.a('qS'),self.a('qS',[(0,1)])
text = self.zero_profit(f"E_zp_out_{name}",conditions['zp_out'],nn,map_2,qD,qD2,qS,PbT,PwT,PwT2,output=True)+'\n\t'
text += self.zero_profit(f"E_zp_nout_{name}",conditions['zp_nout'],nn,map_2,qD,qD2,qS,PbT,PwT,PwT2,output=False)+'\n\t'
text += self.demand(f"E_q_out_{name}", conditions['q_out'],nn,nnn,map_,map_3,mu,mu3,sigma2,PwT,PwT2,PwT3,PbT2,qD,qD2,qS2,output=True)+'\n\t'
text += self.demand(f"E_q_nout_{name}", conditions['q_nout'],nn,nnn,map_,map_3,mu,mu3,sigma2,PwT,PwT2,PwT3,PbT2,qD,qD2,qS2,output=False)
return text
def zero_profit(self,name,conditions,nn,map_,qD,qD2,qS,PbT,PwT,PwT2,output=False):
""" zero profits condition """
RHS = f"""sum({nn}$({map_}), {qD2}*{PwT2})"""
if output is True:
return equation(name,self.PbT.doms(),conditions,f"{PbT}*{qS}",RHS)
else:
return equation(name,self.PwT.doms(),conditions,f"{PwT}*{qD}",RHS)
def demand(self,name,conditions,nn,nnn,map_,map_3,mu,mu3,sigma2,PwT,PwT2,PwT3,PbT2,qD,qD2,qS2,output=False):
""" MNL demand """
if output is False:
RHS = f"""sum({nn}$({map_}), {mu} * exp(({PwT2}-{PwT})*{sigma2}) * {qD2}/ sum({nnn}$({map_3}), {mu3}*exp(({PwT2}-{PwT3})*{sigma2})))"""
else:
RHS = f"""sum({nn}$({map_}), {mu} * exp(({PbT2}-{PwT})*{sigma2}) * {qS2}/ sum({nnn}$({map_3}), {mu3}*exp(({PbT2}-{PwT3})*{sigma2})))"""
return equation(name,self.qD.doms(),conditions,qD,RHS)
class MNL_out:
""" collection of price indices / demand systems for CET nests """
def __init__(self,version='std',**kwargs):
""" Add version of the model """
self.version = version
def add_symbols(self,db,ns_local,ns_global={},**kwargs):
for sym in ['map_']:
setattr(self,sym,db[ns_local[sym]])
for sym in ('PbT','PwT','qD','qS','mu','eta','n','out'):
setattr(self,sym,db[df(sym,ns_global)])
self.aliases = {i: db.alias_dict0[self.n.name][i] for i in range(len(db.alias_dict0[self.n.name]))}
def add_conditions(self,db,ns_tree):
self.conditions = {'zp': db[ns_tree['knots']].write(), 'q_out': db[ns_tree['bra_o']].write(),'q_nout': db[ns_tree['bra_no']].write()}
def a(self,attr,lot_indices=[],l='',lag={}):
""" get the version of the symbol self.attr with alias from list of tuples with indices (lot_indices) and potentially .l added."""
return getattr(self,attr).write(alias=create_alias_dict(self.aliases,lot_indices),l=l,lag=lag)
def run(self,name,conditions=None):
conditions = self.conditions if conditions is None else conditions
nn,nnn = self.a('n',[(0,1)]), self.a('n',[(0,2)])
mu,mu3 = self.a('mu'), self.a('mu',[(0,2)])
eta2,out2,out3 = self.a('eta',[(0,1)]),self.a('out',[(0,1)]),self.a('out',[(0,2)])
map_,map_2,map_3 = self.a('map_'),self.a('map_',[(0,1),(1,0)]), self.a('map_',[(0,2)])
PwT,PwT2,PwT3 = self.a('PwT'), self.a('PwT',[(0,1)]), self.a('PwT',[(0,2)])
PbT,PbT2,PbT3 = self.a('PbT'),self.a('PbT',[(0,1)]),self.a('PbT',[(0,2)])
qD,qD2 = self.a('qD'),self.a('qD',[(0,1)])
qS,qS2 = self.a('qS'),self.a('qS',[(0,1)])
text = self.zero_profit(f"E_zp_{name}",conditions['zp'],nn,map_2,out2,qD,qD2,qS2,PbT2,PwT,PwT2)+'\n\t'
text += self.demand(f"E_q_out_{name}",conditions['q_out'],nn,nnn,map_,map_3,mu,mu3,out3,eta2,qD,qD2,qS,PwT,PwT2,PwT3,PbT,PbT3,output=True)+'\n\t'
text += self.demand(f"E_q_nout_{name}",conditions['q_nout'],nn,nnn,map_,map_3,mu,mu3,out3,eta2,qD,qD2,qS,PwT,PwT2,PwT3,PbT,PbT3,output=False)
return text
def demand(self,name,conditions,nn,nnn,map_,map_3,mu,mu3,out3,eta2,qD,qD2,qS,PwT,PwT2,PwT3,PbT,PbT3,output=False):
if output is False:
RHS = f"""sum({nn}$({map_}), {mu} * exp(({PwT}-{PwT2})*(-{eta2}))*{qD2}/(sum({nnn}$({map_3} and {out3}), {mu3}*exp(({PbT3}-{PwT2})/(-{eta2})))+sum({nnn}$({map_3} and not {out3}), {mu3}*exp(({PwT3}-{PwT2})*(-{eta2})))))"""
return equation(name,self.qD.doms(),conditions,qD,RHS)
else:
RHS = f"""sum({nn}$({map_}), {mu} * exp(({PbT}-{PwT2})*(-{eta2}))*{qD2}/(sum({nnn}$({map_3} and {out3}), {mu3}*exp(({PbT3}/{PwT2})/(-{eta2})))+sum({nnn}$({map_3} and not {out3}), {mu3}*exp(({PwT3}-{PwT2})*(-{eta2})))))"""
return equation(name,self.qS.doms(),conditions,qS,RHS)
def zero_profit(self,name,conditions,nn,map_2,out2,qD,qD2,qS2,PbT2,PwT,PwT2):
RHS = f"""sum({nn}$({map_2} and {out2}), {qS2}*{PbT2})+sum({nn}$({map_2} and not {out2}), {qD2}*{PwT2})"""
return equation(name,self.PwT.doms(),conditions,f"{PwT}*{qD}",RHS)
class simplesum:
""" Collection of equations that define a variable as the simple sum of others """
def __init__(self):
pass
def add_symbols(self, db, ns):
[setattr(self,sym,db[ns[sym]]) for sym in ('n', 'sumUaggs', 'sumU2U', 'qsumU', 'sumXaggs', 'sumX2X', 'qsumX', 'qD')]
self.aliases = {i: db.alias_dict0[self.n.name][i] for i in range(len(db.alias_dict0[self.n.name]))}
def add_conditions(self):
self.conditions = {'sumUaggs': self.sumUaggs.write(), 'sumXaggs': self.sumXaggs.write()}
def a(self,attr,lot_indices=[],l='',lag={}):
""" get the version of the symbol self.attr with alias from list of tuples with indices (lot_indices) and potentially .l added."""
return getattr(self,attr).write(alias=create_alias_dict(self.aliases,lot_indices),l=l,lag=lag)
def run(self):
nn = self.a("n", [(0, 1)])
sumUaggs, sumXaggs = "sumUaggs", "sumXaggs"
sumU2U, sumX2X = self.a("sumU2U"), self.a("sumX2X")
qsumU, qsumX = self.a("qsumU"), self.a("qsumX")
qD2 = self.a("qD", [(0, 1)])
name_sumU = "E_sumU"
name_sumX = "E_sumX"
text = self.simplesum(name_sumU, self.conditions["sumUaggs"], sumUaggs, sumU2U, qsumU, qD2, nn) + '\n\t'
text += self.simplesum(name_sumX, self.conditions["sumXaggs"], sumXaggs, sumX2X, qsumX, qD2, nn)
return text
def simplesum(self, name, conditions, agg, agg2ind, qagg, qD2, nn):
LHS = f"{qagg}"
RHS = f"sum({nn}$({agg2ind}), {qD2})"
return equation(name, getattr(self, agg).doms(), conditions, LHS, RHS)
class pricewedge:
def __init__(self,**kwargs):
pass
def add_symbols(self,db,ns,dynamic=False):
[setattr(self,sym,db[ns[sym]]) for sym in ('n','markup','tauS','tauLump','Peq','PbT','qS','out')];
if dynamic is True:
[setattr(self,sym,db[ns[sym]]) for sym in ('t','txE','ic') if sym in ns];
self.aliases = {i: db.alias_dict0[self.n.name][i] for i in range(len(db.alias_dict0[self.n.name]))}
def add_conditions(self,db,dynamic=False):
self.conditions = {'pw': self.out.write()}
if dynamic is True:
self.conditions = {key: value+' and '+self.txE.write() for key,value in self.conditions.items()}
def a(self,attr,lot_indices=[],l='',lag={}):
""" get the version of the symbol self.attr with alias from list of tuples with indices (lot_indices) and potentially .l added."""
return getattr(self,attr).write(alias=create_alias_dict(self.aliases,lot_indices),l=l,lag=lag)
def run(self,name):
return self.pricewedge(f"E_pw_{name}",self.conditions['pw'],self.a('Peq'),self.a('PbT'),self.a('PbT',[(0,1)]),self.a('qS',[(0,1)]),self.a('markup'),self.a('tauS'),self.a('tauLump'),self.a('n',[(0,1)]),self.a('out',[(0,1)]))
def pricewedge(self,name,conditions,Peq,PbT,PbT2,qS2,markup,tauS,tauLump,nn,out2):
RHS = f"""(1+{markup})*({PbT}*(1+{tauLump}/sum({nn}$({out2}), {qS2}*{PbT2}))+{tauS}+{0 if not hasattr(self,'ic') else self.ic.write()})"""
return equation(name,self.PbT.doms(),conditions,Peq,RHS)
class ict_v1:
""" Installation costs """
def __init__(self,s=False,**kwargs):
self.ns = self.namespace(kwargs)
self.sector = s
@staticmethod
def namespace(self,**kwargs):
return {key: df(key,kwargs) for key in ('ic','ic_1','ic_2','ic_tvc','os')}
def add_symbols(self,db,ns):
[setattr(self,sym,db[ns[sym]]) for sym in ('n','t','txE','tx0','t0','tE','dur','dur2inv','PwT','qD','Rrate','rDepr','R_LR','g_LR','infl_LR','qS','PbT','out')];
if self.sector is not False:
self.ss = db[self.sector]
for sym in self.ns:
db[self.ns[sym]] = self.default_var_series(sym)
setattr(self,sym,db[self.ns[sym]])
self.aliases = {i: db.alias_dict0[self.n.name][i] for i in range(len(db.alias_dict0[self.n.name]))}
def add_conditions(self,db,ns):
self.conditions = {'lom': db[ns['txE']].write(), 'pk': db[ns['tx0E']].write(),'Ktvc': db[ns['tE']].write()}
self.conditions = {key: value +' and '+db[ns['dur']].write() for key,value in self.conditions.items()}
[self.conditions.__setitem__(k,f"{db[ns['out']].write()} and {db[ns['txE']].write()}") for k in ('os','instcost')];
if self.sector is not False:
self.conditions = {key: value +' and '+self.ss.write() for key,value in self.conditions.items()}
def default_var_series(self,var):
if var == 'ic':
return DataBase.gpy_symbol(pd.Series(1, index = DataBase_wheels.prepend_index_with_1dindex(self.out.vals,self.txE.vals),name=self.ns[var]),**{'text':'sum of installation costs in sector s, scaled by value of outputs, per output'})
elif var == 'os':
return pd.Series(0.5, index = DataBase_wheels.prepend_index_with_1dindex(self.out.vals,self.txE.vals),name=self.ns[var])
elif var =='ic_1' and self.sector is False:
return pd.Series(0.1,index = self.dur.vals, name=self.ns[var])
elif var =='ic_1' and self.sector is not False:
return pd.Series(0.1, index = | pd.MultiIndex.from_product([self.ss.vals, self.dur.vals]) | pandas.MultiIndex.from_product |
# A code snippet to demostrate the use of
# pandas python library
import matplotlib.pyplot as plt
import pandas as pd
# The data is stored in the pandas DataFrame
reviews = pd.read_csv('data\ign.csv')
# head() prints the initial portion of the data
print(reviews.head())
# tail displays the last portion of the data
print(reviews.tail())
# shape returns the size of the DataFrame
print(reviews.shape)
# Pandas vs just using NumPy is that Pandas allows you
# to have columns with different data types
# indexing with pandas
# this can be done with iloc and loc
print((reviews.iloc[0:10, :]))
# removing the index column as it is not required anymore
reviews = reviews.iloc[:, 1:]
print(reviews.head())
# indexing using labels in pandas using loc[]
print(reviews.loc[:, 'score'])
# multiple columns can be passed using a list
print(reviews.loc[:, ['score', 'release_year']])
# retrieving columns using Pandas Series Object
print(reviews['score'])
print(reviews[['score', 'release_year']])
# DataFrame stores tabular data, but a Series stores a single column or row of data
# Creating a series object manually
fruit_series = pd.Series(['watermelon', 'lychee', 'strawberry'])
dessert_seies = pd.Series(['watermelon mochi', 'lychee icecream', 'strawberry shortcake'])
print(fruit_series, dessert_seies)
print('\n')
dataframe = pd.DataFrame([fruit_series, dessert_seies])
print(dataframe)
print('\n')
# another way to create a DataFrame in pandas
# column and row names can also be specified using the index and columns argument respectively
my_dataframe = | pd.DataFrame([[1, 2, 3], ['watermelon', 'mango', 'lemon']], index=['numbers', 'fruits'], columns=['column1', 'column2', 'column3']) | pandas.DataFrame |
from collections import defaultdict
import scipy.integrate as integrate
import scipy.special as special
import numpy as np
import pandas as pd
import math
import re
import random
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from wordcloud import WordCloud
import functools
import operator
import nltk
from nltk.corpus import stopwords
from nltk.stem.snowball import PorterStemmer
from nltk.tokenize import RegexpTokenizer
from langdetect import detect
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.cluster import KMeans
from tqdm import tqdm
from matplotlib import pyplot as plt
import pickle
from PIL import Image
########## EXERCISE 1 ##########
# Our hash function
def hash_function(string_):
n = 2**32 + 15
result = 0
for char in string_:
result = result * 31 + ord(char)
result = format(result % n, '032b')
return result
# Create buckets
def create_registers():
return defaultdict(lambda :-1)
# Update buckets
def update_register(string_, registers):
b = 12
x = hash_function(string_)
j = int(str(x)[:b],2)
if '1' in set(x[b:]):
rho_w = (x[b:]).index('1')+1
else:
rho_w = len(x[b:])
registers[j] = max(registers[j],rho_w)
# process each row and pass to the register
def process_data(registers):
with open('hash.txt') as f:
while True:
line = f.readline()
if not line:
break
update_register(line.strip(), registers)
# estimate the cardinality
def hyperLogLog(registers):
b = 12
m = 2**b
alpha = (m)*(integrate.quad(lambda u: (math.log((2+u)/(1+u),2))**(m),0,np.infty )[0])
Z =(sum(2**-registers[j] for j in registers.keys()))**(-1)
E = (alpha)**(-1)*(m**2)*Z
return E
# the error of our filter
def error_rate(registers_count):
return 1.3 / math.sqrt(2**registers_count)
########## EXERCISE 2 ##########
# group by product id and concatenate text fields
def groupby_productid_df(df):
productid_df = | pd.DataFrame() | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2018-2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from qiime2.plugin.testing import TestPluginBase
from q2_diversity_lib import (faith_pd, pielou_evenness, observed_features,
shannon_entropy)
import io
import biom
import skbio
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import copy
nonphylogenetic_measures = [observed_features, pielou_evenness,
shannon_entropy]
class SmokeTests(TestPluginBase):
package = 'q2_diversity_lib.tests'
def setUp(self):
super().setUp()
self.empty_table = biom.Table(np.array([]), [], [])
def test_non_phylogenetic_passed_empty_table(self):
for measure in nonphylogenetic_measures:
with self.assertRaisesRegex(ValueError, "empty"):
measure(table=self.empty_table)
class FaithPDTests(TestPluginBase):
package = 'q2_diversity_lib.tests'
def setUp(self):
super().setUp()
self.input_table = biom.Table(np.array([[1, 0, .5, 999, 1],
[0, 1, 2, 0, 1],
[0, 0, 0, 1, 1]]),
['A', 'B', 'C'],
['S1', 'S2', 'S3', 'S4', 'S5'])
self.input_tree = skbio.TreeNode.read(io.StringIO(
'((A:0.3, B:0.50):0.2, C:100)root;'))
self.faith_pd_expected = pd.Series({'S1': 0.5, 'S2': 0.7, 'S3': 1.0,
'S4': 100.5, 'S5': 101},
name='faith_pd')
def test_receives_empty_table(self):
empty_table = biom.Table(np.array([]), [], [])
with self.assertRaisesRegex(ValueError, "empty"):
faith_pd(table=empty_table, phylogeny=self.input_tree)
def test_method(self):
actual = faith_pd(table=self.input_table, phylogeny=self.input_tree)
| pdt.assert_series_equal(actual, self.faith_pd_expected) | pandas.util.testing.assert_series_equal |
import torch
import numpy as np
import matplotlib.pyplot as plt
import pandas
import os
import seaborn as sns
from argparse import ArgumentParser
from models.utils.continual_model import ContinualModel
from datasets.utils.continual_dataset import ContinualDataset
from typing import Tuple
from utils.conf import base_path
from datasets import NAMES as DATASET_NAMES
class MainVisual:
def __init__(self):
self.markers = ['*-', '*:', '^-', '^:', 'o-', 'o:', 'v-', 'v:', 'x-', 'x:',
'o--', '*--', 'v--', '^--']
self.ptms = ["albert", "bert", "gpt2", "roberta", "xlnet"]
self.datasets = ["seq-clinc150", "seq-maven", "seq-webred"]
self.settings = ["class", "task"]
self.prob_types = ["proto", "final"]
self.methods = ["vanilla", "ewc", "hat", "er", "derpp", "joint"]
self.num_tasks = {"clinc150": 15, "maven": 16, "webred": 24}
self.bsize = [200, 500, 1000]
self.time = ["time"]
self.ft = ["forward_transfer"]
self.bt = ["backward_transfer"]
self.fgt = ["forgetting"]
self.selection = {
"method": self.methods,
"ptm": self.ptms,
"time": self.time,
"ft": self.ft,
"bt": self.bt,
"fgt": self.fgt
}
def visualize(self, xs, tags, results, x_label, y_label, out_file, title):
for i, value in enumerate(results):
plt.plot(xs, value, self.markers[i], label=tags[i])
plt.legend()
plt.xlabel(x_label, fontsize=10)
plt.ylabel(y_label, fontsize=10)
plt.title(title.split(".")[0])
plt.savefig(out_file)
plt.clf()
pass
def visualize_grouped_bar(self, x_label, y_label, hue, title, data, file_path):
sns.set_theme(style="whitegrid")
# Draw a nested barplot by species and sex
g = sns.catplot(
data=data, kind="bar",
x=x_label, y=y_label, hue=hue,
ci="sd", palette="viridis", alpha=.6, height=6
)
sns.set(rc={"figure.dpi": 300, 'savefig.dpi': 300})
g.despine(left=True)
# plt.xlabel(x_label, fontsize=15)
plt.ylabel(y_label, fontsize=15)
g.legend.set_title(hue)
# plt.title(title)
if y_label == "accuracy":
plt.ylim(0, 105)
g.savefig(file_path)
plt.clf()
def merg_data(self, datasets, setting=None, merge_all=False):
clumns = ["PLM", "Method", "forward transfer", "backward transfer", "forgetting", "time", "dataset", "task"]
all_df = None
if not merge_all:
for ds in datasets:
file_name = "{dataset}_{setting}".format(dataset=ds, setting=setting)
in_file = "./data/detail_result/{file_name}.csv".format(file_name=file_name)
last_task = "task{id}".format(id=self.num_tasks[ds])
clumns[-1] = last_task
df = pandas.read_csv(in_file)
sub_df = pandas.DataFrame(df[clumns])
sub_df = sub_df.rename(columns={last_task: "mean accuracy"})
length = len(sub_df)
sub_df["setting"] = [setting] * length
if all_df is None:
all_df = sub_df
else:
all_df = pandas.concat([all_df, sub_df])
return all_df
else:
for ds in datasets:
for set_ in self.settings:
file_name = "{dataset}_{setting}".format(dataset=ds, setting=set_)
in_file = "./data/detail_result/{file_name}.csv".format(file_name=file_name)
last_task = "task{id}".format(id=self.num_tasks[ds])
clumns[-1] = last_task
df = pandas.read_csv(in_file)
sub_df = | pandas.DataFrame(df[clumns]) | pandas.DataFrame |
import numpy as np
import pandas
from sklearn.metrics import mean_squared_error
import math
from math import sqrt
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.datasets import load_iris
from sklearn import preprocessing
ree = 0
getter1 = pandas.read_csv('testing_nfl.csv')
getter2 = getter1.values
train = np.zeros([1, getter2.shape[1]])
for i in range(len(getter2)):
try:
if not getter2[i, 3] == '-' and not math.isnan(float(getter2[i, 3])):
train = np.vstack((train, getter2[i, :]))
except:
pass
X_trn_raw, y_trn = train[1:,3:19], train[1:,21]
y_trn = np.array([float(item) for item in y_trn])
y_trn = y_trn.astype(np.float32)
X_trn_raw = np.array([[float(elm) for elm in row] for row in X_trn_raw])
X_trn_raw = X_trn_raw.astype(np.float32)
for i in range(len(X_trn_raw)):
X_trn_raw[i, 1] = X_trn_raw[i, 0] - X_trn_raw[i, 1]
X_trn_raw[i, 9] = X_trn_raw[i, 8] - X_trn_raw[i, 9]
pandas.DataFrame(X_trn_raw).to_csv("raw.csv")
inputs = X_trn_raw - np.mean(X_trn_raw,axis=0) #shift
inputs = inputs/(np.max(inputs,axis=0)) #normalize
inputs = np.concatenate((inputs, np.ones((X_trn_raw.shape[0],1))), axis = 1) #add bias
inputs_new = np.concatenate((X_trn_raw[:,0:8],np.square(X_trn_raw[:,0:8])), axis=1) #add square term
inputs_new = inputs_new - np.mean(inputs_new,axis=0) #shift
inputs_new = inputs_new/(np.max(inputs_new,axis=0)) #normalize
inputs_new_bruteforce = np.zeros(inputs_new.shape)
while ree < len(X_trn_raw):
inputs_new_bruteforce[ree, :] = inputs_new[ree+1, :]
inputs_new_bruteforce[ree+1, :] = inputs_new[ree, :]
inputs[ree, 8:16] = inputs[ree+1, 0:8]
inputs[ree+1, 8:16] = inputs[ree, 0:8]
ree += 2
| pandas.DataFrame(inputs_new) | pandas.DataFrame |
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import MiniBatchKMeans,DBSCAN
from sklearn.preprocessing import normalize
import pandas as pd
import numpy as np
from DBSCAN import MyDBSCAN
dataset_path = "../data/dataset_TIST2015/"
def Read_Data(filename,limit):
chunksize = 10 ** 6
count = 0
data_df = | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
from tqdm import tqdm
from nltk.corpus import stopwords
from gensim.corpora import Dictionary
from gensim.matutils import corpus2dense
from gensim.models.wrappers import LdaMallet
from gensim import utils
from gensim.models import Phrases
from gensim.models.phrases import Phraser
from src.python.utils import tokenize
def prep_text_lda(docs, vocab_size=20000):
""" docs: (pd.Series str) cleaned text """
english_stopwords = set([s.replace("\'", "") for s in stopwords.words("english")])
tqdm.pandas(desc="Tokenizing")
tokenized_docs = docs.progress_apply(lambda x: [w.lower() for w in tokenize(x)])
bigram = Phrases(tokenized_docs.values.tolist())
phraser = Phraser(bigram)
tqdm.pandas(desc="Bigrams")
bigrammed_docs = tokenized_docs.progress_apply(lambda tokens_: phraser[tokens_])
id2word = Dictionary(bigrammed_docs.values.tolist())
id2word.filter_extremes(keep_n=vocab_size, no_above=0.5)
id2word.filter_tokens(bad_ids=[id2word.token2id[a] for a in english_stopwords if a in id2word.token2id])
id2word.compactify()
tqdm.pandas(desc="Cleaning")
tokenized = bigrammed_docs.progress_apply(lambda doc_tokens: " ".join([w for w in doc_tokens if w in id2word.token2id]))
reconst_docs = tokenized.apply(lambda x: x.split())
return id2word, reconst_docs
def fit_lda(prefix, tokenized_docs, id2word,
mallet_path=os.environ["MALLET_PATH"],
num_topics=500, iterations=500):
if not os.path.isdir(prefix):
os.makedirs(prefix)
if os.path.exists(os.path.join(prefix, "saved_model.pkl")):
return utils.SaveLoad.load(os.path.join(prefix, "saved_model.pkl"))
elif tokenized_docs is None:
raise ValueError("LDA model not found at {}/{}".format(prefixed, "saved_model.pkl"))
if mallet_path is None or mallet_path == "":
raise ValueError("No mallet path specified")
corpus = [id2word.doc2bow(tokens) for tokens in tokenized_docs.values.tolist()]
lda_model = LdaMallet(mallet_path=mallet_path,
prefix=prefix,
corpus=corpus,
id2word=id2word,
iterations=iterations,
workers=4,
num_topics=num_topics,
optimize_interval=20)
lda_model.save(os.path.join(prefix, "saved_model.pkl"))
id2word.save_as_text(os.path.join(prefix, "id2word"))
# save clean lda weights for later analysis
W = lda_model.get_topics()
W = | pd.DataFrame(W) | pandas.DataFrame |
import re
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from woodwork import DataColumn, DataTable
from woodwork.datatable import _check_unique_column_names
from woodwork.logical_types import (
URL,
Boolean,
Categorical,
CountryCode,
Datetime,
Double,
EmailAddress,
Filepath,
FullName,
Integer,
IPAddress,
LatLong,
LogicalType,
NaturalLanguage,
Ordinal,
PhoneNumber,
SubRegionCode,
Timedelta,
ZIPCode
)
from woodwork.tests.testing_utils import (
check_column_order,
mi_between_cols,
to_pandas,
validate_subset_dt
)
from woodwork.utils import import_or_none
dd = import_or_none('dask.dataframe')
dask_delayed = import_or_none('dask.delayed')
ks = import_or_none('databricks.koalas')
def test_datatable_df_property(sample_df):
dt = DataTable(sample_df)
assert dt.df is sample_df
pd.testing.assert_frame_equal(to_pandas(dt.df), to_pandas(sample_df))
def test_datatable_with_numeric_datetime_time_index(time_index_df):
dt = DataTable(time_index_df, time_index='ints', logical_types={'ints': Datetime})
error_msg = 'Time index column must contain datetime or numeric values'
with pytest.raises(TypeError, match=error_msg):
DataTable(time_index_df, name='datatable', time_index='strs', logical_types={'strs': Datetime})
assert dt.time_index == 'ints'
assert dt.to_dataframe()['ints'].dtype == 'datetime64[ns]'
def test_datatable_with_numeric_time_index(time_index_df):
# Set a numeric time index on init
dt = DataTable(time_index_df, time_index='ints')
date_col = dt['ints']
assert dt.time_index == 'ints'
assert date_col.logical_type == Integer
assert date_col.semantic_tags == {'time_index', 'numeric'}
# Specify logical type for time index on init
dt = DataTable(time_index_df, time_index='ints', logical_types={'ints': 'Double'})
date_col = dt['ints']
assert dt.time_index == 'ints'
assert date_col.logical_type == Double
assert date_col.semantic_tags == {'time_index', 'numeric'}
# Change time index to normal datetime time index
dt = dt.set_time_index('times')
date_col = dt['ints']
assert dt.time_index == 'times'
assert date_col.logical_type == Double
assert date_col.semantic_tags == {'numeric'}
# Set numeric time index after init
dt = DataTable(time_index_df, logical_types={'ints': 'Double'})
dt = dt.set_time_index('ints')
date_col = dt['ints']
assert dt.time_index == 'ints'
assert date_col.logical_type == Double
assert date_col.semantic_tags == {'time_index', 'numeric'}
def test_datatable_adds_standard_semantic_tags(sample_df):
dt = DataTable(sample_df,
name='datatable',
logical_types={
'id': Categorical,
'age': Integer,
})
assert dt.semantic_tags['id'] == {'category'}
assert dt.semantic_tags['age'] == {'numeric'}
def test_check_unique_column_names(sample_df):
if ks and isinstance(sample_df, ks.DataFrame):
pytest.skip("Koalas enforces unique column names")
duplicate_cols_df = sample_df.copy()
if dd and isinstance(sample_df, dd.DataFrame):
duplicate_cols_df = dd.concat([duplicate_cols_df, duplicate_cols_df['age']], axis=1)
else:
duplicate_cols_df.insert(0, 'age', [18, 21, 65, 43], allow_duplicates=True)
with pytest.raises(IndexError, match='Dataframe cannot contain duplicate columns names'):
_check_unique_column_names(duplicate_cols_df)
def test_datatable_types(sample_df):
new_dates = ["2019~01~01", "2019~01~02", "2019~01~03", "2019~01~04"]
if dd and isinstance(sample_df, dd.DataFrame):
sample_df['formatted_date'] = pd.Series(new_dates)
else:
sample_df['formatted_date'] = new_dates
ymd_format = Datetime(datetime_format='%Y~%m~%d')
dt = DataTable(sample_df, logical_types={'formatted_date': ymd_format})
returned_types = dt.types
assert isinstance(returned_types, pd.DataFrame)
assert 'Physical Type' in returned_types.columns
assert 'Logical Type' in returned_types.columns
assert 'Semantic Tag(s)' in returned_types.columns
assert returned_types.shape[1] == 3
assert len(returned_types.index) == len(sample_df.columns)
assert all([dc.logical_type in ww.type_system.registered_types or isinstance(dc.logical_type, LogicalType) for dc in dt.columns.values()])
correct_logical_types = {
'id': Integer,
'full_name': NaturalLanguage,
'email': NaturalLanguage,
'phone_number': NaturalLanguage,
'age': Integer,
'signup_date': Datetime,
'is_registered': Boolean,
'formatted_date': ymd_format
}
correct_logical_types = pd.Series(list(correct_logical_types.values()),
index=list(correct_logical_types.keys()))
assert correct_logical_types.equals(returned_types['Logical Type'])
for tag in returned_types['Semantic Tag(s)']:
assert isinstance(tag, str)
def test_datatable_typing_info_with_col_names(sample_df):
dt = DataTable(sample_df)
typing_info_df = dt._get_typing_info(include_names_col=True)
assert isinstance(typing_info_df, pd.DataFrame)
assert 'Data Column' in typing_info_df.columns
assert 'Physical Type' in typing_info_df.columns
assert 'Logical Type' in typing_info_df.columns
assert 'Semantic Tag(s)' in typing_info_df.columns
assert typing_info_df.shape[1] == 4
assert typing_info_df.iloc[:, 0].name == 'Data Column'
assert len(typing_info_df.index) == len(sample_df.columns)
assert all([dc.logical_type in LogicalType.__subclasses__() or isinstance(dc.logical_type, LogicalType) for dc in dt.columns.values()])
correct_logical_types = {
'id': Integer,
'full_name': NaturalLanguage,
'email': NaturalLanguage,
'phone_number': NaturalLanguage,
'age': Integer,
'signup_date': Datetime,
'is_registered': Boolean,
}
correct_logical_types = pd.Series(list(correct_logical_types.values()),
index=list(correct_logical_types.keys()))
assert correct_logical_types.equals(typing_info_df['Logical Type'])
for tag in typing_info_df['Semantic Tag(s)']:
assert isinstance(tag, str)
correct_column_names = pd.Series(list(sample_df.columns),
index=list(sample_df.columns))
assert typing_info_df['Data Column'].equals(correct_column_names)
def test_datatable_head(sample_df):
dt = DataTable(sample_df, index='id', logical_types={'email': 'EmailAddress'}, semantic_tags={'signup_date': 'birthdat'})
head = dt.head()
assert isinstance(head, pd.DataFrame)
assert isinstance(head.columns, pd.MultiIndex)
if dd and isinstance(sample_df, dd.DataFrame):
assert len(head) == 2
else:
assert len(head) == 4
for i in range(len(head.columns)):
name, dtype, logical_type, tags = head.columns[i]
dc = dt[name]
# confirm the order is the same
assert dt._dataframe.columns[i] == name
# confirm the rest of the attributes match up
assert dc.dtype == dtype
assert dc.logical_type == logical_type
assert str(list(dc.semantic_tags)) == tags
shorter_head = dt.head(1)
assert len(shorter_head) == 1
assert head.columns.equals(shorter_head.columns)
def test_datatable_repr(small_df):
dt = DataTable(small_df)
dt_repr = repr(dt)
expected_repr = ' Physical Type Logical Type Semantic Tag(s)\nData Column \nsample_datetime_series datetime64[ns] Datetime []'
assert dt_repr == expected_repr
dt_html_repr = dt._repr_html_()
expected_repr = '<table border="1" class="dataframe">\n <thead>\n <tr style="text-align: right;">\n <th></th>\n <th>Physical Type</th>\n <th>Logical Type</th>\n <th>Semantic Tag(s)</th>\n </tr>\n <tr>\n <th>Data Column</th>\n <th></th>\n <th></th>\n <th></th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>sample_datetime_series</th>\n <td>datetime64[ns]</td>\n <td>Datetime</td>\n <td>[]</td>\n </tr>\n </tbody>\n</table>'
assert dt_html_repr == expected_repr
def test_datatable_repr_empty(empty_df):
dt = DataTable(empty_df)
assert repr(dt) == 'Empty DataTable'
assert dt._repr_html_() == 'Empty DataTable'
assert dt.head() == 'Empty DataTable'
def test_set_types_combined(sample_df):
dt = DataTable(sample_df, index='id', time_index='signup_date')
assert dt['signup_date'].semantic_tags == set(['time_index'])
assert dt['signup_date'].logical_type == Datetime
assert dt['age'].semantic_tags == set(['numeric'])
assert dt['age'].logical_type == Integer
assert dt['is_registered'].semantic_tags == set()
assert dt['is_registered'].logical_type == Boolean
assert dt['email'].logical_type == NaturalLanguage
assert dt['phone_number'].logical_type == NaturalLanguage
semantic_tags = {
'signup_date': ['test1'],
'age': [],
'is_registered': 'test2'
}
logical_types = {
'email': 'EmailAddress',
'phone_number': PhoneNumber,
'age': 'Double'
}
dt = dt.set_types(logical_types=logical_types, semantic_tags=semantic_tags)
assert dt['signup_date'].semantic_tags == set(['test1', 'time_index'])
assert dt['signup_date'].logical_type == Datetime
assert dt['age'].semantic_tags == set(['numeric'])
assert dt['age'].logical_type == Double
assert dt['is_registered'].semantic_tags == set(['test2'])
assert dt['is_registered'].logical_type == Boolean
assert dt['email'].logical_type == EmailAddress
assert dt['phone_number'].logical_type == PhoneNumber
def test_new_dt_from_columns(sample_df):
dt = DataTable(sample_df, time_index='signup_date', index='id', name='dt_name')
dt = dt.set_types(logical_types={
'full_name': FullName,
'email': EmailAddress,
'phone_number': PhoneNumber,
'age': Double,
'signup_date': Datetime,
})
dt.set_types(semantic_tags={
'full_name': ['new_tag', 'tag2'],
'age': 'numeric',
})
empty_dt = dt._new_dt_from_cols([])
assert len(empty_dt.columns) == 0
just_index = dt._new_dt_from_cols(['id'])
assert just_index.index == dt.index
assert just_index.time_index is None
validate_subset_dt(just_index, dt)
just_time_index = dt._new_dt_from_cols(['signup_date'])
assert just_time_index.time_index == dt.time_index
assert just_time_index.index is None
validate_subset_dt(just_time_index, dt)
transfer_schema = dt._new_dt_from_cols(['phone_number'])
assert transfer_schema.index is None
assert transfer_schema.time_index is None
validate_subset_dt(transfer_schema, dt)
def test_pop(sample_df):
dt = DataTable(sample_df,
name='datatable',
logical_types={'age': Integer},
semantic_tags={'age': 'custom_tag'},
use_standard_tags=True)
datacol = dt.pop('age')
assert isinstance(datacol, DataColumn)
assert 'custom_tag' in datacol.semantic_tags
assert all(to_pandas(datacol.to_series()).values == [33, 25, 33, 57])
assert datacol.logical_type == Integer
assert 'age' not in dt.to_dataframe().columns
assert 'age' not in dt.columns
assert 'age' not in dt.logical_types.keys()
assert 'age' not in dt.semantic_tags.keys()
def test_shape(categorical_df):
dt = ww.DataTable(categorical_df)
dt_shape = dt.shape
df_shape = dt.to_dataframe().shape
if dd and isinstance(categorical_df, dd.DataFrame):
assert isinstance(dt.shape[0], dask_delayed.Delayed)
dt_shape = (dt_shape[0].compute(), dt_shape[1])
df_shape = (df_shape[0].compute(), df_shape[1])
assert dt_shape == (10, 5)
assert dt_shape == df_shape
dt.pop('ints')
dt_shape = dt.shape
df_shape = dt.to_dataframe().shape
if dd and isinstance(categorical_df, dd.DataFrame):
assert isinstance(dt.shape[0], dask_delayed.Delayed)
dt_shape = (dt_shape[0].compute(), dt_shape[1])
df_shape = (df_shape[0].compute(), df_shape[1])
assert dt_shape == (10, 4)
assert dt_shape == df_shape
def test_select_invalid_inputs(sample_df):
dt = DataTable(sample_df, time_index='signup_date', index='id', name='dt_name')
dt = dt.set_types(logical_types={
'full_name': FullName,
'email': EmailAddress,
'phone_number': PhoneNumber,
'age': Double,
'signup_date': Datetime,
})
dt = dt.set_types(semantic_tags={
'full_name': ['new_tag', 'tag2'],
'age': 'numeric',
})
err_msg = "Invalid selector used in include: 1 must be either a string or LogicalType"
with pytest.raises(TypeError, match=err_msg):
dt.select(['boolean', 'index', Double, 1])
dt_empty = dt.select([])
assert len(dt_empty.columns) == 0
def test_select_single_inputs(sample_df):
dt = DataTable(sample_df, time_index='signup_date', index='id', name='dt_name')
dt = dt.set_types(logical_types={
'full_name': FullName,
'email': EmailAddress,
'phone_number': PhoneNumber,
'signup_date': Datetime(datetime_format='%Y-%m-%d')
})
dt = dt.set_types(semantic_tags={
'full_name': ['new_tag', 'tag2'],
'age': 'numeric',
'signup_date': 'date_of_birth'
})
dt_ltype_string = dt.select('full_name')
assert len(dt_ltype_string.columns) == 1
assert 'full_name' in dt_ltype_string.columns
dt_ltype_obj = dt.select(Integer)
assert len(dt_ltype_obj.columns) == 2
assert 'age' in dt_ltype_obj.columns
assert 'id' in dt_ltype_obj.columns
dt_tag_string = dt.select('index')
assert len(dt_tag_string.columns) == 1
assert 'id' in dt_tag_string.columns
dt_tag_instantiated = dt.select('Datetime')
assert len(dt_tag_instantiated.columns) == 1
assert 'signup_date' in dt_tag_instantiated.columns
def test_select_list_inputs(sample_df):
dt = DataTable(sample_df, time_index='signup_date', index='id', name='dt_name')
dt = dt.set_types(logical_types={
'full_name': FullName,
'email': EmailAddress,
'phone_number': PhoneNumber,
'signup_date': Datetime(datetime_format='%Y-%m-%d'),
})
dt = dt.set_types(semantic_tags={
'full_name': ['new_tag', 'tag2'],
'age': 'numeric',
'signup_date': 'date_of_birth',
'email': 'tag2',
'is_registered': 'category'
})
dt_just_strings = dt.select(['FullName', 'index', 'tag2', 'boolean'])
assert len(dt_just_strings.columns) == 4
assert 'id' in dt_just_strings.columns
assert 'full_name' in dt_just_strings.columns
assert 'email' in dt_just_strings.columns
assert 'is_registered' in dt_just_strings.columns
dt_mixed_selectors = dt.select([FullName, 'index', 'time_index', Integer])
assert len(dt_mixed_selectors.columns) == 4
assert 'id' in dt_mixed_selectors.columns
assert 'full_name' in dt_mixed_selectors.columns
assert 'signup_date' in dt_mixed_selectors.columns
assert 'age' in dt_mixed_selectors.columns
dt_common_tags = dt.select(['category', 'numeric', Boolean, Datetime])
assert len(dt_common_tags.columns) == 3
assert 'is_registered' in dt_common_tags.columns
assert 'age' in dt_common_tags.columns
assert 'signup_date' in dt_common_tags.columns
def test_select_instantiated():
ymd_format = Datetime(datetime_format='%Y~%m~%d')
df = pd.DataFrame({
'dates': ["2019/01/01", "2019/01/02", "2019/01/03"],
'ymd': ["2019~01~01", "2019~01~02", "2019~01~03"],
})
dt = DataTable(df,
logical_types={'ymd': ymd_format,
'dates': Datetime})
dt = dt.select('Datetime')
assert len(dt.columns) == 2
err_msg = "Invalid selector used in include: Datetime cannot be instantiated"
with pytest.raises(TypeError, match=err_msg):
dt.select(ymd_format)
def test_select_maintain_order(sample_df):
dt = DataTable(sample_df, logical_types={col_name: 'NaturalLanguage' for col_name in sample_df.columns})
new_dt = dt.select('NaturalLanguage')
check_column_order(dt, new_dt)
def test_filter_cols(sample_df):
dt = DataTable(sample_df, time_index='signup_date', index='id', name='dt_name')
filtered = dt._filter_cols(include='email', col_names=True)
assert filtered == ['email']
filtered_log_type_string = dt._filter_cols(include='NaturalLanguage')
filtered_log_type = dt._filter_cols(include=NaturalLanguage)
assert filtered_log_type == filtered_log_type_string
filtered_semantic_tag = dt._filter_cols(include='numeric')
assert filtered_semantic_tag == ['age']
filtered_multiple = dt._filter_cols(include=['numeric'])
expected = ['phone_number', 'age']
for col in filtered_multiple:
assert col in expected
filtered_multiple_overlap = dt._filter_cols(include=['NaturalLanguage', 'email'], col_names=True)
expected = ['full_name', 'phone_number', 'email']
for col in filtered_multiple_overlap:
assert col in expected
def test_datetime_inference_with_format_param():
df = pd.DataFrame({
'index': [0, 1, 2],
'dates': ["2019/01/01", "2019/01/02", "2019/01/03"],
'ymd_special': ["2019~01~01", "2019~01~02", "2019~01~03"],
'mdy_special': pd.Series(['3~11~2000', '3~12~2000', '3~13~2000'], dtype='string'),
})
dt = DataTable(df,
name='dt_name',
logical_types={'ymd_special': Datetime(datetime_format='%Y~%m~%d'),
'mdy_special': Datetime(datetime_format='%m~%d~%Y'),
'dates': Datetime},
time_index='ymd_special')
assert dt.time_index == 'ymd_special'
assert dt['dates'].logical_type == Datetime
assert isinstance(dt['ymd_special'].logical_type, Datetime)
assert isinstance(dt['mdy_special'].logical_type, Datetime)
dt = dt.set_time_index('mdy_special')
assert dt.time_index == 'mdy_special'
df = pd.DataFrame({
'mdy_special': pd.Series(['3&11&2000', '3&12&2000', '3&13&2000'], dtype='string'),
})
dt = DataTable(df)
dt = dt.set_types(logical_types={'mdy_special': Datetime(datetime_format='%m&%d&%Y')})
dt.time_index = 'mdy_special'
assert isinstance(dt['mdy_special'].logical_type, Datetime)
assert dt.time_index == 'mdy_special'
def test_natural_language_inference_with_config_options():
dataframe = pd.DataFrame({
'index': [0, 1, 2],
'values': ["0123456", "01234567", "012345"]
})
ww.config.set_option('natural_language_threshold', 5)
dt = DataTable(dataframe, name='dt_name')
assert dt.columns['values'].logical_type == NaturalLanguage
ww.config.reset_option('natural_language_threshold')
def test_describe_dict(describe_df):
dt = DataTable(describe_df, index='index_col')
stats_dict = dt.describe_dict()
index_order = ['physical_type',
'logical_type',
'semantic_tags',
'count',
'nunique',
'nan_count',
'mean',
'mode',
'std',
'min',
'first_quartile',
'second_quartile',
'third_quartile',
'max',
'num_true',
'num_false']
stats_dict_to_df = pd.DataFrame(stats_dict).reindex(index_order)
stats_df = dt.describe()
pd.testing.assert_frame_equal(stats_df, stats_dict_to_df)
def test_describe_does_not_include_index(describe_df):
dt = DataTable(describe_df, index='index_col')
stats_df = dt.describe()
assert 'index_col' not in stats_df.columns
def test_datatable_describe_method(describe_df):
categorical_ltypes = [Categorical,
CountryCode,
Ordinal(order=('yellow', 'red', 'blue')),
SubRegionCode,
ZIPCode]
boolean_ltypes = [Boolean]
datetime_ltypes = [Datetime]
formatted_datetime_ltypes = [Datetime(datetime_format='%Y~%m~%d')]
timedelta_ltypes = [Timedelta]
numeric_ltypes = [Double, Integer]
natural_language_ltypes = [EmailAddress, Filepath, FullName, IPAddress,
PhoneNumber, URL]
latlong_ltypes = [LatLong]
expected_index = ['physical_type',
'logical_type',
'semantic_tags',
'count',
'nunique',
'nan_count',
'mean',
'mode',
'std',
'min',
'first_quartile',
'second_quartile',
'third_quartile',
'max',
'num_true',
'num_false']
# Test categorical columns
category_data = describe_df[['category_col']]
if ks and isinstance(category_data, ks.DataFrame):
expected_dtype = 'object'
else:
expected_dtype = 'category'
for ltype in categorical_ltypes:
expected_vals = pd.Series({
'physical_type': expected_dtype,
'logical_type': ltype,
'semantic_tags': {'category', 'custom_tag'},
'count': 7,
'nunique': 3,
'nan_count': 1,
'mode': 'red'}, name='category_col')
dt = DataTable(category_data, logical_types={'category_col': ltype}, semantic_tags={'category_col': 'custom_tag'})
stats_df = dt.describe()
assert isinstance(stats_df, pd.DataFrame)
assert set(stats_df.columns) == {'category_col'}
assert stats_df.index.tolist() == expected_index
pd.testing.assert_series_equal(expected_vals, stats_df['category_col'].dropna())
# Test boolean columns
boolean_data = describe_df[['boolean_col']]
if ks and isinstance(category_data, ks.DataFrame):
expected_dtype = 'bool'
else:
expected_dtype = 'boolean'
for ltype in boolean_ltypes:
expected_vals = pd.Series({
'physical_type': expected_dtype,
'logical_type': ltype,
'semantic_tags': {'custom_tag'},
'count': 8,
'nan_count': 0,
'mode': True,
'num_true': 5,
'num_false': 3}, name='boolean_col')
dt = DataTable(boolean_data, logical_types={'boolean_col': ltype}, semantic_tags={'boolean_col': 'custom_tag'})
stats_df = dt.describe()
assert isinstance(stats_df, pd.DataFrame)
assert set(stats_df.columns) == {'boolean_col'}
assert stats_df.index.tolist() == expected_index
pd.testing.assert_series_equal(expected_vals, stats_df['boolean_col'].dropna())
# Test datetime columns
datetime_data = describe_df[['datetime_col']]
for ltype in datetime_ltypes:
expected_vals = pd.Series({
'physical_type': ltype.pandas_dtype,
'logical_type': ltype,
'semantic_tags': {'custom_tag'},
'count': 7,
'nunique': 6,
'nan_count': 1,
'mean': pd.Timestamp('2020-01-19 09:25:42.857142784'),
'mode': pd.Timestamp('2020-02-01 00:00:00'),
'min': pd.Timestamp('2020-01-01 00:00:00'),
'max': pd.Timestamp('2020-02-02 18:00:00')}, name='datetime_col')
dt = DataTable(datetime_data, logical_types={'datetime_col': ltype}, semantic_tags={'datetime_col': 'custom_tag'})
stats_df = dt.describe()
assert isinstance(stats_df, pd.DataFrame)
assert set(stats_df.columns) == {'datetime_col'}
assert stats_df.index.tolist() == expected_index
pd.testing.assert_series_equal(expected_vals, stats_df['datetime_col'].dropna())
# Test formatted datetime columns
formatted_datetime_data = describe_df[['formatted_datetime_col']]
for ltype in formatted_datetime_ltypes:
converted_to_datetime = pd.to_datetime(['2020-01-01',
'2020-02-01',
'2020-03-01',
'2020-02-02',
'2020-03-02',
pd.NaT,
'2020-02-01',
'2020-01-02'])
expected_vals = pd.Series({
'physical_type': ltype.pandas_dtype,
'logical_type': ltype,
'semantic_tags': {'custom_tag'},
'count': 7,
'nunique': 6,
'nan_count': 1,
'mean': converted_to_datetime.mean(),
'mode': pd.to_datetime('2020-02-01'),
'min': converted_to_datetime.min(),
'max': converted_to_datetime.max()}, name='formatted_datetime_col')
dt = DataTable(formatted_datetime_data,
logical_types={'formatted_datetime_col': ltype},
semantic_tags={'formatted_datetime_col': 'custom_tag'})
stats_df = dt.describe()
assert isinstance(stats_df, pd.DataFrame)
assert set(stats_df.columns) == {'formatted_datetime_col'}
assert stats_df.index.tolist() == expected_index
pd.testing.assert_series_equal(expected_vals, stats_df['formatted_datetime_col'].dropna())
# Test timedelta columns - Skip for Koalas
if not (ks and isinstance(describe_df, ks.DataFrame)):
timedelta_data = describe_df['timedelta_col']
for ltype in timedelta_ltypes:
expected_vals = pd.Series({
'physical_type': ltype.pandas_dtype,
'logical_type': ltype,
'semantic_tags': {'custom_tag'},
'count': 7,
'nan_count': 1,
'mode': pd.Timedelta('31days')}, name='col')
df = pd.DataFrame({'col': timedelta_data})
dt = DataTable(df, logical_types={'col': ltype}, semantic_tags={'col': 'custom_tag'})
stats_df = dt.describe()
assert isinstance(stats_df, pd.DataFrame)
assert set(stats_df.columns) == {'col'}
assert stats_df.index.tolist() == expected_index
pd.testing.assert_series_equal(expected_vals, stats_df['col'].dropna())
# Test numeric columns
numeric_data = describe_df[['numeric_col']]
for ltype in numeric_ltypes:
expected_vals = pd.Series({
'physical_type': ltype.pandas_dtype,
'logical_type': ltype,
'semantic_tags': {'numeric', 'custom_tag'},
'count': 7,
'nunique': 6,
'nan_count': 1,
'mean': 20.857142857142858,
'mode': 10,
'std': 18.27957486220227,
'min': 1,
'first_quartile': 10,
'second_quartile': 17,
'third_quartile': 26,
'max': 56}, name='numeric_col')
dt = DataTable(numeric_data, logical_types={'numeric_col': ltype}, semantic_tags={'numeric_col': 'custom_tag'})
stats_df = dt.describe()
assert isinstance(stats_df, pd.DataFrame)
assert set(stats_df.columns) == {'numeric_col'}
assert stats_df.index.tolist() == expected_index
pd.testing.assert_series_equal(expected_vals, stats_df['numeric_col'].dropna(), check_exact=False)
# Test natural language columns
natural_language_data = describe_df[['natural_language_col']]
if ks and isinstance(category_data, ks.DataFrame):
expected_dtype = 'object'
else:
expected_dtype = 'string'
for ltype in natural_language_ltypes:
expected_vals = pd.Series({
'physical_type': expected_dtype,
'logical_type': ltype,
'semantic_tags': {'custom_tag'},
'count': 7,
'nan_count': 1,
'mode': 'Duplicate sentence.'}, name='natural_language_col')
dt = DataTable(natural_language_data,
logical_types={'natural_language_col': ltype},
semantic_tags={'natural_language_col': 'custom_tag'})
stats_df = dt.describe()
assert isinstance(stats_df, pd.DataFrame)
assert set(stats_df.columns) == {'natural_language_col'}
assert stats_df.index.tolist() == expected_index
pd.testing.assert_series_equal(expected_vals, stats_df['natural_language_col'].dropna())
# Test latlong columns
latlong_data = describe_df[['latlong_col']]
expected_dtype = 'object'
for ltype in latlong_ltypes:
mode = [0, 0] if ks and isinstance(describe_df, ks.DataFrame) else (0, 0)
expected_vals = pd.Series({
'physical_type': expected_dtype,
'logical_type': ltype,
'semantic_tags': {'custom_tag'},
'count': 6,
'nan_count': 2,
'mode': mode}, name='latlong_col')
dt = DataTable(latlong_data,
logical_types={'latlong_col': ltype},
semantic_tags={'latlong_col': 'custom_tag'})
stats_df = dt.describe()
assert isinstance(stats_df, pd.DataFrame)
assert set(stats_df.columns) == {'latlong_col'}
assert stats_df.index.tolist() == expected_index
pd.testing.assert_series_equal(expected_vals, stats_df['latlong_col'].dropna())
def test_datatable_describe_with_improper_tags(describe_df):
df = describe_df.copy()[['boolean_col', 'natural_language_col']]
logical_types = {
'boolean_col': Boolean,
'natural_language_col': NaturalLanguage,
}
semantic_tags = {
'boolean_col': 'category',
'natural_language_col': 'numeric',
}
dt = DataTable(df, logical_types=logical_types, semantic_tags=semantic_tags)
stats_df = dt.describe()
# Make sure boolean stats were computed with improper 'category' tag
assert stats_df['boolean_col']['logical_type'] == Boolean
assert stats_df['boolean_col']['semantic_tags'] == {'category'}
# Make sure numeric stats were not computed with improper 'numeric' tag
assert stats_df['natural_language_col']['semantic_tags'] == {'numeric'}
assert stats_df['natural_language_col'][['mean', 'std', 'min', 'max']].isnull().all()
def test_datatable_describe_with_no_semantic_tags(describe_df):
df = describe_df.copy()[['category_col', 'numeric_col']]
logical_types = {
'category_col': Categorical,
'numeric_col': Integer,
}
dt = DataTable(df, logical_types=logical_types, use_standard_tags=False)
stats_df = dt.describe()
assert dt['category_col'].semantic_tags == set()
assert dt['numeric_col'].semantic_tags == set()
# Make sure category stats were computed
assert stats_df['category_col']['semantic_tags'] == set()
assert stats_df['category_col']['nunique'] == 3
# Make sure numeric stats were computed
assert stats_df['numeric_col']['semantic_tags'] == set()
np.testing.assert_almost_equal(stats_df['numeric_col']['mean'], 20.85714, 5)
def test_datatable_describe_with_include(sample_df):
semantic_tags = {
'full_name': 'tag1',
'email': ['tag2'],
'age': ['numeric', 'age']
}
dt = DataTable(sample_df, semantic_tags=semantic_tags)
col_name_df = dt.describe(include=['full_name'])
assert col_name_df.shape == (16, 1)
assert 'full_name', 'email' in col_name_df.columns
semantic_tags_df = dt.describe(['tag1', 'tag2'])
assert 'full_name' in col_name_df.columns
assert len(semantic_tags_df.columns) == 2
logical_types_df = dt.describe([Datetime, Boolean])
assert 'signup_date', 'is_registered' in logical_types_df.columns
assert len(logical_types_df.columns) == 2
multi_params_df = dt.describe(['age', 'tag1', Datetime])
expected = ['full_name', 'age', 'signup_date']
for col_name in expected:
assert col_name in multi_params_df.columns
multi_params_df['full_name'].equals(col_name_df['full_name'])
multi_params_df['full_name'].equals(dt.describe()['full_name'])
def test_value_counts(categorical_df):
logical_types = {
'ints': Integer,
'categories1': Categorical,
'bools': Boolean,
'categories2': Categorical,
'categories3': Categorical,
}
dt = DataTable(categorical_df, logical_types=logical_types)
val_cts = dt.value_counts()
for col in dt.columns:
if col in ['ints', 'bools']:
assert col not in val_cts
else:
assert col in val_cts
none_val = np.nan
expected_cat1 = [{'value': 200, 'count': 4}, {'value': 100, 'count': 3}, {'value': 1, 'count': 2}, {'value': 3, 'count': 1}]
# Koalas converts numeric categories to strings, so we need to update the expected values for this
# Koalas will result in `None` instead of `np.nan` in categorical columns
if ks and isinstance(categorical_df, ks.DataFrame):
updated_results = []
for items in expected_cat1:
updated_results.append({k: (str(v) if k == 'value' else v) for k, v in items.items()})
expected_cat1 = updated_results
none_val = 'None'
assert val_cts['categories1'] == expected_cat1
assert val_cts['categories2'] == [{'value': none_val, 'count': 6}, {'value': 'test', 'count': 3}, {'value': 'test2', 'count': 1}]
assert val_cts['categories3'] == [{'value': none_val, 'count': 7}, {'value': 'test', 'count': 3}]
val_cts_descending = dt.value_counts(ascending=True)
for col, vals in val_cts_descending.items():
for i in range(len(vals)):
assert vals[i]['count'] == val_cts[col][-i - 1]['count']
val_cts_dropna = dt.value_counts(dropna=True)
assert val_cts_dropna['categories3'] == [{'value': 'test', 'count': 3}]
val_cts_2 = dt.value_counts(top_n=2)
for col in val_cts_2:
assert len(val_cts_2[col]) == 2
def test_datatable_replace_nans_for_mutual_info():
df_nans = pd.DataFrame({
'ints': pd.Series([2, pd.NA, 5, 2], dtype='Int64'),
'floats': pd.Series([3.3, None, 2.3, 1.3]),
'bools': pd.Series([True, None, True, False]),
'int_to_cat_nan': pd.Series([1, np.nan, 3, 1], dtype='category'),
'str': pd.Series(['test', np.nan, 'test2', 'test']),
'str_no_nan': pd.Series(['test', 'test2', 'test2', 'test']),
'dates': pd.Series(['2020-01-01', None, '2020-01-02', '2020-01-03'])
})
dt_nans = DataTable(df_nans)
formatted_df = dt_nans._replace_nans_for_mutual_info(dt_nans.to_dataframe().copy())
assert isinstance(formatted_df, pd.DataFrame)
assert formatted_df['ints'].equals(pd.Series([2, 3, 5, 2], dtype='Int64'))
assert formatted_df['floats'].equals(pd.Series([3.3, 2.3, 2.3, 1.3], dtype='float'))
assert formatted_df['bools'].equals(pd.Series([True, True, True, False], dtype='category'))
assert formatted_df['int_to_cat_nan'].equals(pd.Series([1, 1, 3, 1], dtype='category'))
assert formatted_df['str'].equals(pd.Series(['test', 'test', 'test2', 'test'], dtype='category'))
assert formatted_df['str_no_nan'].equals(pd.Series(['test', 'test2', 'test2', 'test'], dtype='category'))
assert formatted_df['dates'].equals(pd.Series(['2020-01-01', '2020-01-02', '2020-01-02', '2020-01-03'], dtype='datetime64[ns]'))
def test_datatable_make_categorical_for_mutual_info():
df = pd.DataFrame({
'ints1': pd.Series([1, 2, 3, 2]),
'ints2': pd.Series([1, 100, 1, 100]),
'bools': pd.Series([True, False, True, False]),
'categories': pd.Series(['test', 'test2', 'test2', 'test']),
'dates': pd.Series(['2020-01-01', '2019-01-02', '2020-08-03', '1997-01-04'])
})
dt = DataTable(df)
formatted_num_bins_df = dt._make_categorical_for_mutual_info(dt.to_dataframe().copy(), num_bins=4)
assert isinstance(formatted_num_bins_df, pd.DataFrame)
assert formatted_num_bins_df['ints1'].equals( | pd.Series([0, 1, 3, 1], dtype='int8') | pandas.Series |
"""
Module for shapefile resampling methods.
This code was originailly developed by <NAME>.
(https://github.com/basaks)
See `uncoverml.scripts.shiftmap_cli` for a resampling CLI.
"""
import tempfile
import os
from os.path import abspath, exists, splitext
from os import remove
import logging
import geopandas as gpd
import pandas as pd
import pandas.core.algorithms as algos
import numpy as np
import sklearn
from shapely.geometry import Polygon
from fiona.errors import DriverError
import uncoverml as ls
import uncoverml.mllog
import uncoverml.targets
BIN = 'bin'
GEOMETRY = 'geometry'
_logger = logging.getLogger(__name__)
def bootstrap_data_indicies(population, samples=None, random_state=1):
samples = population if samples is None else samples
return np.random.RandomState(random_state).randint(0, population, samples)
def prepapre_dataframe(data, fields_to_keep):
if isinstance(data, gpd.GeoDataFrame):
gdf = data
elif isinstance(data, ls.targets.Targets):
gdf = data.to_geodataframe()
# Try to treat as shapefile.
else:
try:
gdf = gpd.read_file(data)
except DriverError:
_logger.error(
"Couldn't read data for resampling. Ensure a valid "
"shapefile path or Targets object is being provided "
"as input.")
raise
return filter_fields(fields_to_keep, gdf)
def filter_fields(fields_to_keep, gdf):
fields_to_keep = [GEOMETRY] + list(fields_to_keep) # add geometry
original_fields = gdf.columns
for f in fields_to_keep:
if f not in original_fields:
raise RuntimeError("field '{}' must exist in shapefile".format(f))
gdf_out = gdf[fields_to_keep]
return gdf_out
def resample_by_magnitude(input_data, target_field, bins=10, interval='percentile',
fields_to_keep=[], bootstrap=True, output_samples=None,
validation=False, validation_points=100):
"""
Parameters
----------
input_gdf : geopandas.GeoDataFrame
Geopandas dataframe containing targets to be resampled.
target_field : str
target field name based on which resampling is performed. Field
must exist in the input_shapefile
bins : int
number of bins for sampling
fields_to_keep : list
of strings to store in the output shapefile
bootstrap : bool, optional
whether to sample with replacement or not
output_samples : int, optional
number of samples in the output shpfile. If not provided, the
output samples will be assumed to be the same as the original
shapefile
validation : bool, optional
validation file name
validation_points : int, optional
approximate number of points in the validation shapefile
Returns
-------
"""
if bootstrap and validation:
raise ValueError('bootstrapping should not be use while'
'creating a validation shapefile.')
if interval not in ['percentile', 'linear']:
_logger.warning("Interval method '{}' not recognised, defaulting to 'percentile'"
.format(interval))
interval = 'percentile'
if len(fields_to_keep):
fields_to_keep.append(target_field)
else:
fields_to_keep = [target_field]
gdf_out = prepapre_dataframe(input_data, fields_to_keep)
# the idea is stolen from pandas.qcut
# pd.qcut does not work for cases when it result in non-unique bin edges
target = gdf_out[target_field].values
if interval == 'percentile':
bin_edges = algos.quantile(
np.unique(target), np.linspace(0, 1, bins+1))
elif interval == 'linear':
bin_edges = np.linspace(np.min(target), np.max(target), bins + 1)
result = pd.core.reshape.tile._bins_to_cuts(target, bin_edges,
labels=False,
include_lowest=True)
# add to output df for sampling
gdf_out[BIN] = result[0]
dfs_to_concat = []
validation_dfs_to_concat = []
total_samples = output_samples if output_samples else gdf_out.shape[0]
samples_per_bin = total_samples // bins
validate_array = np.ones(bins, dtype=np.bool)
if validation and bins > validation_points:
validate_array[validation_points:] = False
np.random.shuffle(validate_array)
gb = gdf_out.groupby(BIN)
for i, (b, gr) in enumerate(gb):
if bootstrap:
dfs_to_concat.append(gr.sample(n=samples_per_bin,
replace=bootstrap))
else:
_df, v_df = _sample_without_replacement(gr, samples_per_bin,
validate_array[i])
dfs_to_concat.append(_df)
validation_dfs_to_concat.append(v_df)
final_df = | pd.concat(dfs_to_concat) | pandas.concat |
# column addition
import pandas as pd
import numpy as np
d={'one':pd.Series([1,2,3],index=['a','b','c']),
'two': | pd.Series([1,2,3,4],index=['a','b','c','d']) | pandas.Series |
# Type: module
# String form: <module 'WindPy' from '/opt/conda/lib/python3.6/WindPy.py'>
# File: /opt/conda/lib/python3.6/WindPy.py
# Source:
from ctypes import *
import threading
import traceback
from datetime import datetime, date, time, timedelta
import time as t
import re
from WindData import *
from WindBktData import *
from XMLParser import XMLReader
import pandas as pd
import logging
import getpass
r = XMLReader("/wind/serverapi/wsq_decode.xml")
# import speedtcpclient as client
expolib = None
speedlib = None
TDB_lib = None
c_lib = None
# For test use! Should be replaced with a real userID
# userID = "1214779"
api_retry = 1
interval = 2
userName = getpass.getuser()
authDataPath = "/home/" + userName + "/.wind/authData"
authString = readFile(authDataPath)
# userID = str(getJsonTag(authString, 'accountID'))
# if userID == '':
# userID = "1214779"
wind_log_path = "/usr/local/log/"
def DemoWSQCallback(out):
print("DemoWSQCallback")
print(out)
wsq_items = []
def g_wsq_callback(reqID, indata):
out = WindData()
out.set(indata, 3)
out.RequestID = reqID
id2rtField = {}
for item in wsq_items:
id2rtField[item['id']] = item['funname'].upper()
tmp = [id2rtField[str(val)] for val in out.Fields]
out.Fields = tmp
out.Times = datetime.now().strftime('%Y%m%d %H:%M:%S')
try:
g_wsq_callback.callback_funcs[reqID](out)
except:
print(out)
SPDCBTYPE = CFUNCTYPE(None, c_int, POINTER(c_apiout))
spdcb = SPDCBTYPE(g_wsq_callback)
g_wsq_callback.callback_funcs = {}
REQUEST_ID_CANCELALL = 0
REQUEST_ID_SYNC = 1
REQUEST_ID_MAX_RESQUEST = 9999
REQUEST_ID_MIN_RESQUEST = 3
g_requestID = REQUEST_ID_MIN_RESQUEST # The minimum id of NONE BLOCKING MODE
def retry(func):
def wrapper(*args, **kargs):
out = func(*args, **kargs)
if not out:
return out
error_code = type_check(out)
if error_code == -10:
for i in range(api_retry):
out = func(*args, **kargs)
error_code = type_check(out)
if error_code != -10:
break
return out
# 判断out类型,若带usedf参数则为tuple
def type_check(out):
if isinstance(out, tuple):
error_code = out[0]
else:
error_code = out.ErrorCode
return error_code
return wrapper
class WindQnt:
b_start = False
def __static_var(var_name, inital_value):
def _set_var(obj):
setattr(obj, var_name, inital_value)
return obj
return _set_var
def __stringify(arg):
if arg is None:
tmp = [""]
elif arg == "":
tmp = [""]
elif isinstance(arg, str):
a_l = arg.strip().split(',')
arg = ','.join([a.strip() for a in a_l])
tmp = [arg]
elif isinstance(arg, list):
tmp = [str(x) for x in arg]
elif isinstance(arg, tuple):
tmp = [str(x) for x in arg]
elif isinstance(arg, float) or isinstance(arg, int):
tmp = [str(arg)]
elif str(type(arg)) == "<type 'unicode'>":
tmp = [arg]
else:
tmp = None
if tmp is None:
return None
else:
return ";".join(tmp)
def __parseoptions(self, arga=None, argb=None):
options = WindQnt._WindQnt__stringify(self)
if options is None:
return None
if isinstance(arga, tuple):
for i in range(len(arga)):
v = WindQnt._WindQnt__stringify(arga[i])
if v is None:
continue
else:
if options == "":
options = v
else:
options = options + ";" + v
if isinstance(argb, dict):
keys = argb.keys()
for key in keys:
v = WindQnt._WindQnt__stringify(argb[key])
if v is None:
continue
else:
if options == "":
options = str(key) + "=" + v
else:
options = options + ";" + str(key) + "=" + v
return options
@staticmethod
def format_option(options):
if options is None:
return None
option_f = options.replace(';', '&&')
return option_f
# with_time param means you can format hours:minutes:seconds, but not must be
def __parsedate(self, with_time=False):
d = self
if d is None:
d = datetime.today().strftime("%Y-%m-%d")
return d
elif isinstance(d, date):
d = d.strftime("%Y-%m-%d")
return d
elif isinstance(d, datetime):
d = d.strftime("%Y-%m-%d")
return d
elif isinstance(d, str):
try:
d = pure_num = ''.join(list(filter(str.isdigit, d)))
if len(d) != 8 and len(d) != 14:
return None
if len(pure_num) == 14:
d = pure_num[:8] + ' ' + pure_num[8:]
if int(d[9:11]) > 24 or int(d[9:11]) < 0 or \
int(d[11:13]) > 60 or int(d[11:13]) < 0 or \
int(d[13:15]) > 60 or int(d[13:15]) < 0:
return None
if int(d[:4]) < 1000 or int(d[:4]) > 9999 or \
int(d[4:6]) < 1 or int(d[4:6]) > 12 or \
int(d[6:8]) < 1 or int(d[6:8]) > 31:
return None
date_time = d.split(' ')
YMD = date_time[0][:4] + '-' + date_time[0][4:6] + '-' + date_time[0][6:8]
HMS = ''
if with_time and len(date_time) == 2:
HMS = ' ' + date_time[1][:2] + ':' + date_time[1][2:4] + ':' + date_time[1][4:6]
d = YMD + HMS
return d
except:
return None
return d
# def __parsedate(d):
# if d is None:
# d = datetime.today().strftime("%Y-%m-%d")
# return d
# elif isinstance(d, date):
# d = d.strftime("%Y-%m-%d")
# return d
# elif isinstance(d, str):
# try:
# #Try to get datetime object from the user input string.
# #We will go to the except block, given an invalid format.
# if re.match(r'^(?:(?!0000)[0-9]{4}-(?:(?:0[1-9]|1[0-2])-(?:0[1-9]|1[0-9]|2[0-8])|(?:0[13-9]|1[0-2])-(?:29|30)|(?:0[13578]|1[02])-31)|(?:[0-9]{2}(?:0[48]|[2468][048]|[13579][26])|(?:0[48]|[2468][048]|[13579][26])00)-02-29)$',d, re.I|re.M):
# d = datetime.strptime(d, "%Y-%m-%d")
# return d.strftime("%Y-%m-%d")
# elif re.match(r'^(?:(?!0000)[0-9]{4}(?:(?:0[1-9]|1[0-2])(?:0[1-9]|1[0-9]|2[0-8])|(?:0[13-9]|1[0-2])(?:29|30)|(?:0[13578]|1[02])31)|(?:[0-9]{2}(?:0[48]|[2468][048]|[13579][26])|(?:0[48]|[2468][048]|[13579][26])00)0229)$', d, re.I|re.M):
# d = datetime.strptime(d, "%Y%m%d")
# return d.strftime("%Y-%m-%d")
# else:
# return None
# except:
# return None
# else:
# return None
#
# return d
def use_debug_file(self, debug_expo='/wind/serverapi/libExpoWrapperDebug.so',
debug_speed='/wind/serverapi/libSpeedWrapperDebug.so'):
WindQnt.debug_expo = debug_expo
WindQnt.debug_speed = debug_speed
@staticmethod
def format_wind_data(error_codes, msg):
out = WindData()
out.ErrorCode = error_codes
out.Codes = ['ErrorReport']
out.Fields = ['OUT MESSAGE']
out.Times = datetime.now().strftime('%Y%m%d %H:%M:%S')
out.Data = [[msg]]
return out
@staticmethod
def to_dataframe(out):
if out.ErrorCode != 0:
return pd.DataFrame([out.ErrorCode], columns=['ErrorCode'])
col = out.Times
if len(out.Codes) == len(out.Fields) == 1:
idx = out.Fields
elif len(out.Codes) > 1 and len(out.Fields) == 1:
idx = out.Codes
elif len(out.Codes) == 1 and len(out.Fields) > 1:
idx = out.Fields
else:
idx = None
df = pd.DataFrame(out.Data, columns=col)
if idx:
df.index = idx
return df.T.infer_objects()
def isconnected(self):
return 0
class __start:
def __init__(self):
self.restype = c_int32
self.argtypes = [c_wchar_p, c_wchar_p, c_int32]
self.lastCall = 0
def __call__(self, show_welcome=True, retry=1):
global expolib
global speedlib
global TDB_lib
global c_lib
global api_retry
if t.time() - self.lastCall > interval:
if WindQnt.b_start:
return
WindQnt.b_start = True
self.lastCall = t.time()
TDB_lib = CDLL("/wind/serverapi/libtdb.so")
c_lib = CDLL("/wind/serverapi/libtradeapi.so")
c_lib.tLogon.restype = POINTER(c_variant)
c_lib.tQuery.restype = POINTER(c_variant)
c_lib.tLogout.restype = POINTER(c_variant)
c_lib.tSendOrder.restype = POINTER(c_variant)
c_lib.tCancelOrder.restype = POINTER(c_variant)
if hasattr(WindQnt, "debug_expo"):
expolib = CDLL(WindQnt.debug_expo)
else:
expolib = CDLL("/wind/serverapi/libExpoWrapper.so")
expolib.SendMsg2Expo.restype = POINTER(c_apiout)
if hasattr(WindQnt, "debug_speed"):
speedlib = CDLL(WindQnt.debug_speed)
else:
speedlib = CDLL("/wind/serverapi/libSpeedWrapper.so")
speedlib.SendMsg2SpeedAsyc.restype = POINTER(c_apiout)
api_retry = int(retry) if int(retry) < 6 else 5
if show_welcome:
print("COPYRIGHT (C) 2017 Wind Information Co., Ltd. ALL RIGHTS RESERVED.\n"
"IN NO CIRCUMSTANCE SHALL WIND BE RESPONSIBLE FOR ANY DAMAGES OR LOSSES\n"
"CAUSED BY USING WIND QUANT API FOR PYTHON.")
return
else:
# print ("wait a while to start!")
return ERR_WAIT
def __str__(self):
return ("Start the Wind Quant API")
start = __start()
class __wses:
def __init__(self):
self.restype = POINTER(c_apiout)
self.argtypes = [c_wchar_p,c_wchar_p,c_wchar_p,c_wchar_p,c_wchar_p]
self.lastCall = 0
@retry
def __call__(self, codes, fields, beginTime=None, endTime=None, options=None, *arga, **argb):
# write_log('call wsd')
s = int(t.time()*1000)
if expolib is None:
return WindQnt.format_wind_data(-103, '')
if t.time() - self.lastCall < interval:
t.sleep(interval)
if isinstance(endTime, str):
# 判断是否为日期宏,若不是,则调用parsedate方法
endTime_compile = re.findall('\d\d\d\d\d\d\d\d', endTime.replace('-', ''))
if endTime_compile:
endTime = WindQnt._WindQnt__parsedate(endTime)
else:
# 处理datetime类型日期
endTime = WindQnt._WindQnt__parsedate(endTime)
if endTime == None:
print("Invalid date format of endTime! Please use the '%Y-%m-%d' format! E.g. 2016-01-01")
return
if isinstance(beginTime, str):
beginTime_compile = re.findall('\d\d\d\d\d\d\d\d', beginTime.replace('-', ''))
if beginTime_compile:
beginTime = WindQnt._WindQnt__parsedate(beginTime)
else:
beginTime = WindQnt._WindQnt__parsedate(beginTime)
if beginTime == None:
print("Invalid date format of beginTime! Please use the '%Y-%m-%d' format! E.g. 2016-01-01")
return
if(endTime==None): endTime = datetime.today().strftime("%Y-%m-%d")
if(beginTime==None): beginTime = endTime
# chech if the endTime is before than the beginTime
# endD = datetime.strptime(endTime, "%Y-%m-%d")
# beginD = datetime.strptime(beginTime, "%Y-%m-%d")
# if (endD-beginD).days < 0:
# print("The endTime should be later than or equal to the beginTime!")
# return
codes = WindQnt._WindQnt__stringify(codes)
fields = WindQnt._WindQnt__stringify(fields)
options = WindQnt._WindQnt__parseoptions(options, arga, argb)
if codes == None or fields == None or options == None:
print("Insufficient arguments!")
return
userID = str(getJsonTag(authString, 'accountID'))
if userID == '':
userID = "1214779"
tmp = "wses|"+codes+"|"+fields+"|"+beginTime+"|"+endTime+"|"+options+"|"+userID
tmp = tmp.encode("utf16") + b"\x00\x00"
apiOut = expolib.SendMsg2Expo(tmp, len(tmp))
self.lastCall = t.time()
if apiOut.contents.ErrorCode == -1 or apiOut.contents.ErrorCode == -40521010:
msg = 'Request Timeout'
e = int(t.time()*1000)
write_log(str(e-s) + ' call wses')
return WindQnt.format_wind_data(-40521010, msg)
else:
out = WindData()
out.set(apiOut, 1, asdate = True)
if 'usedf' in argb.keys():
usedf = argb['usedf']
if usedf:
if not isinstance(usedf, bool):
print('the sixth parameter is usedf which should be the Boolean type!')
return
try:
if out.ErrorCode != 0:
df = pd.DataFrame(out.Data, index=out.Fields)
df.columns = [x for x in range(df.columns.size)]
return out.ErrorCode, df.T.infer_objects()
col = out.Times
if len(out.Codes) == len(out.Fields) == 1:
idx = out.Fields
elif len(out.Codes) > 1 and len(out.Fields) == 1:
idx = out.Codes
elif len(out.Codes) == 1 and len(out.Fields) > 1:
idx = out.Fields
else:
idx = None
df = pd.DataFrame(out.Data, columns=col)
if idx:
df.index = idx
e = int(t.time()*1000)
write_log(str(e-s) + ' call wsd')
return out.ErrorCode, df.T.infer_objects()
except Exception:
print(traceback.format_exc())
return
if out.ErrorCode != 0:
if len(out.Data) != 0 and len(out.Data[0]) > 100:
if len(out.Data) > 1:
print(str(out.Data)[:10] + '...]...]')
else:
print(str(out.Data)[:10] + '...]]')
else:
print(out.Data)
e = int(t.time()*1000)
write_log(str(e-s) + ' call wses')
return out
def __str__(self):
return ("WSES")
wses = __wses()
class __wsee:
def __init__(self):
self.restype = POINTER(c_apiout)
self.argtypes = [c_wchar_p,c_wchar_p,c_wchar_p] #codes,fields,options
self.lastCall = 0
@retry
def __call__(self, codes, fields, options=None, *arga, **argb):
# write_log('call wsee')
s = int(t.time()*1000)
if expolib is None:
return WindQnt.format_wind_data(-103, '')
if t.time() - self.lastCall < interval:
t.sleep(interval)
codes = WindQnt._WindQnt__stringify(codes)
fields = WindQnt._WindQnt__stringify(fields)
options = WindQnt._WindQnt__parseoptions(options, arga, argb)
if fields == None or options == None:
print("Insufficient arguments!")
return
userID = str(getJsonTag(authString, 'accountID'))
if userID == '':
userID = "1214779"
tmp = "wsee|"+codes+"|"+fields+"|"+options+"|"+userID
tmp = tmp.encode("utf16") + b"\x00\x00"
apiOut = expolib.SendMsg2Expo(tmp, len(tmp))
self.lastCall = t.time()
if apiOut.contents.ErrorCode == -1 or apiOut.contents.ErrorCode == -40521010:
msg = 'Request Timeout'
e = int(t.time()*1000)
write_log(str(e-s) + ' call wsee')
return WindQnt.format_wind_data(-40521010, msg)
else:
out = WindData()
out.set(apiOut, 1, asdate=True)
#将winddata类型数据改为dataframe格式
if 'usedf' in argb.keys():
usedf = argb['usedf']
if usedf:
if not isinstance(usedf, bool):
print('the fourth parameter is usedf which should be the Boolean type!')
return
try:
if out.ErrorCode != 0:
df = pd.DataFrame(out.Data, index=out.Fields)
df.columns = [x for x in range(df.columns.size)]
return out.ErrorCode, df.T.infer_objects()
if out.Codes == 1 or out.Fields == 1:
return out.ErrorCode, WindQnt.to_dataframe(out)
else:
df = pd.DataFrame(out.Data, columns=out.Codes, index=out.Fields)
e = int(t.time()*1000)
write_log(str(e-s) + ' call wsee')
return out.ErrorCode, df.T.infer_objects()
except Exception as e:
print(traceback.format_exc())
return
if out.ErrorCode != 0:
if len(out.Data) != 0 and len(out.Data[0]) > 100:
if len(out.Data) > 1:
print(str(out.Data)[:10] + '...]...]')
else:
print(str(out.Data)[:10] + '...]]')
else:
print(out.Data)
e = int(t.time()*1000)
write_log(str(e-s) + ' call wsee')
return out
def __str__(self):
return ("wsee")
wsee = __wsee()
class __wsi:
def __init__(self):
self.restype = POINTER(c_apiout)
self.argtypes = [c_wchar_p, c_wchar_p, c_wchar_p, c_wchar_p, c_wchar_p]
self.lastCall = 0
@retry
def __call__(self, codes, fields, beginTime=None, endTime=None, options=None, usedf=False, *arga, **argb):
# write_log('call wsi')
s = int(t.time() * 1000)
if expolib is None:
return WindQnt.format_wind_data(-103, '')
if t.time() - self.lastCall < interval:
t.sleep(interval)
# endTime = WindQnt._WindQnt__parsedate(endTime)
# if endTime is None:
# print("Invalid date format of endTime! Please use the '%Y-%m-%d' format! E.g. 2016-01-01")
# return
#
# beginTime = WindQnt._WindQnt__parsedate(beginTime)
# if beginTime is None:
# print("Invalid date format of beginTime! Please use the '%Y-%m-%d' format! E.g. 2016-01-01")
# return
if (endTime is None): endTime = datetime.today().strftime("%Y-%m-%d")
if (beginTime is None): beginTime = endTime
# chech if the endTime is before than the beginTime
# endD = datetime.strptime(endTime, "%Y-%m-%d")
# beginD = datetime.strptime(beginTime, "%Y-%m-%d")
# if (endD-beginD).days < 0:
# print("The endTime should be later than or equal to the beginTime!")
# return
codes = WindQnt._WindQnt__stringify(codes)
fields = WindQnt._WindQnt__stringify(fields)
options = WindQnt._WindQnt__parseoptions(options, arga, argb)
if codes is None or fields is None or options is None:
print("Insufficient arguments!")
return
userID = str(getJsonTag(authString, 'accountID'))
if userID == '':
userID = "1214779"
tmp = "wsi|" + codes + "|" + fields + "|" + beginTime + "|" + endTime + "|" + options + "|" + userID
tmp = tmp.encode("utf16") + b"\x00\x00"
apiOut = expolib.SendMsg2Expo(tmp, len(tmp))
self.lastCall = t.time()
if apiOut.contents.ErrorCode == -1 or apiOut.contents.ErrorCode == -40521010:
msg = 'Request Timeout'
e = int(t.time() * 1000)
write_log(str(e - s) + ' call wsi')
return WindQnt.format_wind_data(-40521010, msg)
else:
out = WindData()
out.set(apiOut, 1, asdate=False)
if usedf:
if not isinstance(usedf, bool):
print('the sixth parameter is usedf which should be the Boolean type!')
return
try:
if out.ErrorCode != 0:
df = pd.DataFrame(out.Data, index=out.Fields)
df.columns = [x for x in range(df.columns.size)]
return out.ErrorCode, df.T.infer_objects()
col = out.Times
if len(out.Codes) == len(out.Fields) == 1:
idx = out.Fields
elif len(out.Codes) > 1 and len(out.Fields) == 1:
idx = out.Codes
elif len(out.Codes) == 1 and len(out.Fields) > 1:
idx = out.Fields
else:
idx = None
df = pd.DataFrame(out.Data, columns=col)
if idx:
df.index = idx
e = int(t.time() * 1000)
write_log(str(e - s) + ' call wsi')
return out.ErrorCode, df.T.infer_objects()
except Exception:
print(traceback.format_exc())
return
if out.ErrorCode != 0:
if len(out.Data) != 0 and len(out.Data[0]) > 100:
if len(out.Data) > 1:
print(str(out.Data)[:10] + '...]...]')
else:
print(str(out.Data)[:10] + '...]]')
else:
print(out.Data)
e = int(t.time() * 1000)
write_log(str(e - s) + ' call wsi')
return out
def __str__(self):
return ("WSI")
wsi = __wsi()
class __wsd:
def __init__(self):
self.restype = POINTER(c_apiout)
self.argtypes = [c_wchar_p, c_wchar_p, c_wchar_p, c_wchar_p, c_wchar_p]
self.lastCall = 0
@retry
def __call__(self, codes, fields, beginTime=None, endTime=None, options=None, usedf=False, *arga, **argb):
# write_log('call wsd')
s = int(t.time() * 1000)
if expolib is None:
return WindQnt.format_wind_data(-103, '')
if t.time() - self.lastCall < interval:
t.sleep(interval)
# endTime = WindQnt._WindQnt__parsedate(endTime)
# if endTime is None:
# print("Invalid date format of endTime! Please use the '%Y-%m-%d' format! E.g. 2016-01-01")
# return
#
# beginTime = WindQnt._WindQnt__parsedate(beginTime)
# if beginTime is None:
# print("Invalid date format of beginTime! Please use the '%Y-%m-%d' format! E.g. 2016-01-01")
# return
if (endTime is None): endTime = datetime.today().strftime("%Y-%m-%d")
if (beginTime is None): beginTime = endTime
# chech if the endTime is before than the beginTime
# endD = datetime.strptime(endTime, "%Y-%m-%d")
# beginD = datetime.strptime(beginTime, "%Y-%m-%d")
# if (endD-beginD).days < 0:
# print("The endTime should be later than or equal to the beginTime!")
# return
codes = WindQnt._WindQnt__stringify(codes)
fields = WindQnt._WindQnt__stringify(fields)
options = WindQnt._WindQnt__parseoptions(options, arga, argb)
if codes is None or fields is None or options is None:
print("Insufficient arguments!")
return
userID = str(getJsonTag(authString, 'accountID'))
if userID == '':
userID = "1214779"
tmp = "wsd|" + codes + "|" + fields + "|" + beginTime + "|" + endTime + "|" + options + "|" + userID
tmp = tmp.encode("utf16") + b"\x00\x00"
apiOut = expolib.SendMsg2Expo(tmp, len(tmp))
self.lastCall = t.time()
if apiOut.contents.ErrorCode == -1 or apiOut.contents.ErrorCode == -40521010:
msg = 'Request Timeout'
e = int(t.time() * 1000)
write_log(str(e - s) + ' call wsd')
return WindQnt.format_wind_data(-40521010, msg)
else:
out = WindData()
out.set(apiOut, 1, asdate=True)
if usedf:
if not isinstance(usedf, bool):
print('the sixth parameter is usedf which should be the Boolean type!')
return
try:
if out.ErrorCode != 0:
df = pd.DataFrame(out.Data, index=out.Fields)
df.columns = [x for x in range(df.columns.size)]
return out.ErrorCode, df.T.infer_objects()
col = out.Times
if len(out.Codes) == len(out.Fields) == 1:
idx = out.Fields
elif len(out.Codes) > 1 and len(out.Fields) == 1:
idx = out.Codes
elif len(out.Codes) == 1 and len(out.Fields) > 1:
idx = out.Fields
else:
idx = None
df = pd.DataFrame(out.Data, columns=col)
if idx:
df.index = idx
e = int(t.time() * 1000)
write_log(str(e - s) + ' call wsd')
return out.ErrorCode, df.T.infer_objects()
except Exception:
print(traceback.format_exc())
return
if out.ErrorCode != 0:
if len(out.Data) != 0 and len(out.Data[0]) > 100:
if len(out.Data) > 1:
print(str(out.Data)[:10] + '...]...]')
else:
print(str(out.Data)[:10] + '...]]')
else:
print(out.Data)
e = int(t.time() * 1000)
write_log(str(e - s) + ' call wsd')
return out
def __str__(self):
return ("WSD")
wsd = __wsd()
class __wst:
def __init__(self):
self.restype = POINTER(c_apiout)
self.argtypes = [c_wchar_p, c_wchar_p, c_wchar_p, c_wchar_p, c_wchar_p]
self.lastCall = 0
@retry
def __call__(self, codes, fields, beginTime=None, endTime=None, options=None, usedf=False, *arga, **argb):
# write_log('call wst')
s = int(t.time() * 1000)
if expolib is None:
return WindQnt.format_wind_data(-103, '')
if t.time() - self.lastCall < interval:
t.sleep(interval)
if (endTime is None): endTime = datetime.today().strftime("%Y-%m-%d")
if (beginTime is None): beginTime = endTime
codes = WindQnt._WindQnt__stringify(codes)
fields = WindQnt._WindQnt__stringify(fields)
options = WindQnt._WindQnt__parseoptions(options, arga, argb)
if codes is None or fields is None or options is None:
print("Insufficient arguments!")
return
userID = str(getJsonTag(authString, 'accountID'))
if userID == '':
userID = "1214779"
tmp = "wst|" + codes + "|" + fields + "|" + beginTime + "|" + endTime + "|" + options + "|" + userID
tmp = tmp.encode("utf16") + b"\x00\x00"
apiOut = expolib.SendMsg2Expo(tmp, len(tmp))
self.lastCall = t.time()
if apiOut.contents.ErrorCode == -1 or apiOut.contents.ErrorCode == -40521010:
msg = 'Request Timeout'
e = int(t.time() * 1000)
write_log(str(e - s) + ' call wst')
return WindQnt.format_wind_data(-40521010, msg)
else:
out = WindData()
out.set(apiOut, 1, asdate=False)
if usedf:
if not isinstance(usedf, bool):
print('the sixth parameter is usedf which should be the Boolean type!')
return
try:
if out.ErrorCode != 0:
df = pd.DataFrame(out.Data, index=out.Fields)
df.columns = [x for x in range(df.columns.size)]
return out.ErrorCode, df.T.infer_objects()
col = out.Times
if len(out.Codes) == len(out.Fields) == 1:
idx = out.Fields
elif len(out.Codes) > 1 and len(out.Fields) == 1:
idx = out.Codes
elif len(out.Codes) == 1 and len(out.Fields) > 1:
idx = out.Fields
else:
idx = None
df = pd.DataFrame(out.Data, columns=col)
if idx:
df.index = idx
e = int(t.time() * 1000)
write_log(str(e - s) + ' call wst')
return out.ErrorCode, df.T.infer_objects()
except Exception:
print(traceback.format_exc())
return
if out.ErrorCode != 0:
if len(out.Data) != 0 and len(out.Data[0]) > 100:
if len(out.Data) > 1:
print(str(out.Data)[:10] + '...]...]')
else:
print(str(out.Data)[:10] + '...]]')
else:
print(out.Data)
e = int(t.time() * 1000)
write_log(str(e - s) + ' call wst')
return out
def __str__(self):
return ("WST")
wst = __wst()
class __wss:
def __init__(self):
self.restype = POINTER(c_apiout)
self.argtypes = [c_wchar_p, c_wchar_p, c_wchar_p] # codes,fields,options
self.lastCall = 0
@retry
def __call__(self, codes, fields, options=None, usedf=None, *arga, **argb):
# write_log('call wss')
s = int(t.time() * 1000)
if expolib is None:
return WindQnt.format_wind_data(-103, '')
if t.time() - self.lastCall < interval:
t.sleep(interval)
codes = WindQnt._WindQnt__stringify(codes)
fields = WindQnt._WindQnt__stringify(fields)
options = WindQnt._WindQnt__parseoptions(options, arga, argb)
if fields is None or options is None:
print("Insufficient arguments!")
return
userID = str(getJsonTag(authString, 'accountID'))
if userID == '':
userID = "1214779"
tmp = "wss|" + codes + "|" + fields + "|" + options + "|" + userID
tmp = tmp.encode("utf16") + b"\x00\x00"
apiOut = expolib.SendMsg2Expo(tmp, len(tmp))
self.lastCall = t.time()
if apiOut.contents.ErrorCode == -1 or apiOut.contents.ErrorCode == -40521010:
msg = 'Request Timeout'
e = int(t.time() * 1000)
write_log(str(e - s) + ' call wss')
return WindQnt.format_wind_data(-40521010, msg)
else:
out = WindData()
out.set(apiOut, 1, asdate=True)
# 将winddata类型数据改为dataframe格式
if usedf:
if not isinstance(usedf, bool):
print('the fourth parameter is usedf which should be the Boolean type!')
return
try:
if out.ErrorCode != 0:
df = pd.DataFrame(out.Data, index=out.Fields)
df.columns = [x for x in range(df.columns.size)]
return out.ErrorCode, df.T.infer_objects()
if out.Codes == 1 or out.Fields == 1:
return out.ErrorCode, WindQnt.to_dataframe(out)
else:
df = | pd.DataFrame(out.Data, columns=out.Codes, index=out.Fields) | pandas.DataFrame |
import os
import platform
import subprocess
import logging
import numpy as np
import xarray as xr
import pandas as pd
import param
from .operation_parameters import OperationParameters
from .iteration_parameters import IterationParameters
from .constituent_properties import ConstituentProperties
from .model_constants import ModelConstants
from .material_properties import MaterialProperties
from .output_control import OutputControl
from .time_control import TimeControl
from .time_series import TimeSeries
from .io import number_of_constituents
from .io import read_bc_file
from .io import write_bc_file
from . import parse_hot_file, parse_dat_file, parse_dat_files, append_array_to_dataset
from genesis.mesh import Simulation
log = logging.getLogger('adhmodel.simulation')
class BoundaryConditions(param.Parameterized):
boundary_strings = param.DataFrame(
default= | pd.DataFrame(data=[], columns=["CARD", "ID", "ID_0", "ID_1"]) | pandas.DataFrame |
import os
import zipfile
from pathlib import Path
import warnings
from shutil import rmtree
import time
import pandas as pd
import numpy as np
import SimpleITK as sitk
from tqdm import tqdm
from segmentation_metrics import compute_segmentation_scores
from survival_metrics import concordance_index
class AIcrowdEvaluator:
def __init__(
self,
ground_truth_segmentation_folder="data/ground_truth/segmentation/",
ground_truth_survival_file="data/ground_truth/survival/hecktor2021_patient_endpoint_testing.csv",
bounding_boxes_file="data/hecktor2021_bbox_testing.csv",
extraction_folder="data/extraction/",
round_number=1,
):
"""Evaluator for the Hecktor Challenge
Args:
ground_truth_folder (str): the path to the folder
containing the ground truth segmentation.
ground_truth_survival_file (str): the path to the file
containing the ground truth survival time.
bounding_boxes_file (str): the path to the csv file which defines
the bounding boxes for each patient.
extraction_folder (str, optional): the path to the folder where the
extraction of the .zip submission
will take place. Defaults to "data/tmp/".
This folder has to be created beforehand.
round_number (int, optional): the round number. Defaults to 1.
"""
self.groud_truth_folder = Path(ground_truth_segmentation_folder)
self.round = round_number
self.extraction_folder = Path(extraction_folder)
self.bounding_boxes_file = Path(bounding_boxes_file)
self.gt_df = pd.read_csv(ground_truth_survival_file).set_index(
"PatientID")
def _evaluate_segmentation(self, client_payload, _context={}):
submission_file_path = client_payload["submission_file_path"]
aicrowd_submission_id = client_payload["aicrowd_submission_id"]
aicrowd_participant_uid = client_payload["aicrowd_participant_id"]
submission_extraction_folder = self.extraction_folder / (
'submission' + str(aicrowd_submission_id) + '/')
submission_extraction_folder.mkdir(parents=True, exist_ok=True)
with zipfile.ZipFile(str(Path(submission_file_path).resolve()),
"r") as zip_ref:
zip_ref.extractall(str(submission_extraction_folder.resolve()))
groundtruth_paths = [
f for f in self.groud_truth_folder.rglob("*.nii.gz")
]
bb_df = pd.read_csv(str(
self.bounding_boxes_file.resolve())).set_index("PatientID")
results_df = pd.DataFrame()
missing_patients = list()
unresampled_patients = list()
resampler = sitk.ResampleImageFilter()
resampler.SetInterpolator(sitk.sitkNearestNeighbor)
for path in tqdm(groundtruth_paths):
patient_id = path.name[:7]
prediction_files = [
f
for f in self.extraction_folder.rglob(patient_id + "*.nii.gz")
]
if len(prediction_files) > 1:
raise Exception(
"There is too many prediction files for patient {}".format(
patient_id))
elif len(prediction_files) == 0:
results_df = results_df.append(
{
"dice_score": 0,
"hausdorff_distance_95": np.inf,
"recall": 0,
"precision": 0,
},
ignore_index=True)
missing_patients.append(patient_id)
continue
bb = np.array([
bb_df.loc[patient_id, "x1"], bb_df.loc[patient_id, "y1"],
bb_df.loc[patient_id, "z1"], bb_df.loc[patient_id, "x2"],
bb_df.loc[patient_id, "y2"], bb_df.loc[patient_id, "z2"]
])
image_gt = sitk.ReadImage(str(path.resolve()))
image_pred = sitk.ReadImage(str(prediction_files[0].resolve()))
resampler.SetReferenceImage(image_gt)
resampler.SetOutputOrigin(bb[:3])
voxel_spacing = np.array(image_gt.GetSpacing())
output_size = np.round(
(bb[3:] - bb[:3]) / voxel_spacing).astype(int)
resampler.SetSize([int(k) for k in output_size])
# Crop to the bonding box and/or resample to the original spacing
spacing = image_gt.GetSpacing()
if spacing != image_pred.GetSpacing():
unresampled_patients.append(patient_id)
image_gt = resampler.Execute(image_gt)
image_pred = resampler.Execute(image_pred)
results_df = results_df.append(
compute_segmentation_scores(
sitk.GetArrayFromImage(image_gt),
sitk.GetArrayFromImage(image_pred),
spacing,
),
ignore_index=True,
)
_result_object = {
"dice_score": results_df["dice_score"].mean(),
"hausdorff_distance_95":
results_df["hausdorff_distance_95"].median(),
"recall": results_df["recall"].mean(),
"precision": results_df["precision"].mean(),
}
rmtree(str(submission_extraction_folder.resolve()))
messages = list()
if len(unresampled_patients) > 0:
messages.append(
f"The following patient(s) was/were not resampled back"
f" to the original resolution: {unresampled_patients}."
f"\nWe applied a nearest neighbor resampling.\n")
if len(missing_patients) > 0:
messages.append(
f"The following patient(s) was/were missing: {missing_patients}."
f"\nA score of 0 and infinity were attributed to them "
f"for the dice score and Hausdorff distance respectively.")
_result_object["message"] = "".join(messages)
return _result_object
def _evaluate_survival(self, client_payload, _context={}):
submission_file_path = client_payload["submission_file_path"]
predictions_df = pd.read_csv(submission_file_path).set_index(
"PatientID")
if "Prediction" not in predictions_df.columns:
raise RuntimeError("The 'Prediction' column is missing.")
extra_patients = [
p for p in predictions_df.index if p not in self.gt_df.index
]
# Discard extra patient
if len(extra_patients) > 0:
predictions_df = predictions_df.drop(labels=extra_patients, axis=0)
# Check for redundant entries
if len(predictions_df.index) > len(list(set(predictions_df.index))):
raise RuntimeError("One or more patients appear twice in the csv")
# The following function concatenate the submission csv and the
# ground truth and fill missing entries with NaNs. The missing
# entries are then counted as non-concordant by the concordance_index
# function
df = pd.concat((self.gt_df, predictions_df), axis=1)
missing_patients = list(df.loc[ | pd.isna(df['Prediction']) | pandas.isna |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 1 12:55:16 2017
@author: rdk10
"""
import os
import pandas as pd
import sitchensis.Functions as f
import tkinter as tk
from tkinter.filedialog import askopenfilename
import pdb
############# Functions below #############################################
def getFileName():
cwd = os.getcwd()
root = tk.Tk()
root.lift()
root.attributes('-topmost',True)
filename = askopenfilename(initialdir = cwd ,title = "Select a tree file and directory", filetypes = [("Excel","*.xlsx"),("Excel","*.xlsm")]) #Ask user to pick files
root.after_idle(root.attributes,'-topmost',False)
root.withdraw()
fileName = filename.rsplit('/')[-1] #Excludes path to file
filePath = os.path.dirname(filename)
return(filePath, fileName)
def importExcelTree(fullFileName):
"""This section assumed one excel file per tree with a tabe for each type of measurements (trunk, segment, or branch)"""
#Import data all at once
treeData = pd.read_excel(fullFileName, sheet_name = None) #,converters={'name':str,'ref':str, 'referenceType':str})
####IMPORTANT if version of pandas is <21 then sheet_name is not recognized and needs to be sheetname. better to update pandas
#list of dictionary keys
keys = [key for key in treeData]
#This tests for types of data present and assigns keys
if any(['trunk' in t.lower() for t in treeData]):
trunkKey = keys[[i for i, key in enumerate(keys) if 'trunk' in key.lower()][0]]
if len(treeData[trunkKey])>0:
trunkBool = True
else:trunkBool = False
else:trunkBool = False
if any(['seg' in t.lower() for t in treeData]):
segKey = keys[[i for i, key in enumerate(keys) if 'seg' in key.lower()][0]]
if len(treeData[segKey])>0:
segBool = True
else:segBool = False
else:segBool = False
if any(['branch' in t.lower() for t in treeData]):
brKey = keys[[i for i, key in enumerate(keys) if 'branch' in key.lower()][0]]
if len(treeData[brKey])>0:
branchBool = True
else:branchBool = False
else:branchBool = False
#Assign declination to variable
if any(['declin' in t.lower() for t in treeData]):
if len([i for i, key in enumerate(keys) if 'declin' in key.lower()])>0:
declinKey = keys[[i for i, key in enumerate(keys) if 'declin' in key.lower()][0]]
declinRefs = pd.read_excel(fullFileName, sheet_name = declinKey ,converters={'name':str})
declinRefs.columns = [x.lower() for x in declinRefs.columns]
declination = declinRefs['declination'].iloc[0] #extract number
declination = declination.item() # convert to python float from numpy.float64
else:
declination = 0.00
#Assign cust refs to dataFrame
if len([i for i, key in enumerate(keys) if 'cust' in key.lower()])>0:
custKey = keys[[i for i, key in enumerate(keys) if 'cust' in key.lower()][0]]
custRefs = pd.read_excel(fullFileName, sheet_name = custKey ,converters={'name':str})
custRefs.columns = [x.lower() for x in custRefs.columns]
custRefs['azi'] = custRefs['azi'] + declination
custRefs = f.calcCustRefs(custRefs)
else:
custRefs = []
#Saves the data if it exists and makes changes to columns so they work in the program
if trunkBool:
trunkDat = | pd.read_excel(fullFileName, sheet_name = trunkKey, converters={'name':str,'ref':str}) | pandas.read_excel |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# shape.py
# definitions of shape characters
import math
import random
import numpy as np
import pandas as pd
from shapely.geometry import Point
from tqdm.auto import tqdm # progress bar
__all__ = [
"FormFactor",
"FractalDimension",
"VolumeFacadeRatio",
"CircularCompactness",
"SquareCompactness",
"Convexity",
"CourtyardIndex",
"Rectangularity",
"ShapeIndex",
"Corners",
"Squareness",
"EquivalentRectangularIndex",
"Elongation",
"CentroidCorners",
"Linearity",
"CompactnessWeightedAxis",
]
class FormFactor:
"""
Calculates form factor of each object in given GeoDataFrame.
.. math::
area \\over {volume^{2 \\over 3}}
Adapted from :cite:`bourdic2012`.
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects
volumes : str, list, np.array, pd.Series
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is
stored volume value.
(To calculate volume you can use :py:func:`momepy.volume`)
areas : str, list, np.array, pd.Series (default None)
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is
stored area value. If set to ``None``, function will calculate areas
during the process without saving them separately.
Attributes
----------
series : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
volumes : Series
Series containing used volume values
areas : Series
Series containing used area values
Examples
--------
>>> buildings_df['formfactor'] = momepy.FormFactor(buildings_df, 'volume').series
>>> buildings_df.formfactor[0]
1.9385988170288635
>>> volume = momepy.Volume(buildings_df, 'height').series
>>> buildings_df['formfactor'] = momepy.FormFactor(buildings_df, volume).series
>>> buildings_df.formfactor[0]
1.9385988170288635
"""
def __init__(self, gdf, volumes, areas=None):
self.gdf = gdf
gdf = gdf.copy()
if not isinstance(volumes, str):
gdf["mm_v"] = volumes
volumes = "mm_v"
self.volumes = gdf[volumes]
if areas is None:
areas = gdf.geometry.area
if not isinstance(areas, str):
gdf["mm_a"] = areas
areas = "mm_a"
self.areas = gdf[areas]
zeros = gdf[volumes] == 0
res = np.empty(len(gdf))
res[zeros] = 0
res[~zeros] = gdf[areas][~zeros] / (gdf[volumes][~zeros] ** (2 / 3))
self.series = pd.Series(res, index=gdf.index)
class FractalDimension:
"""
Calculates fractal dimension of each object in given GeoDataFrame.
.. math::
{2log({{perimeter} \\over {4}})} \\over log(area)
Based on :cite:`mcgarigal1995fragstats`.
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects
areas : str, list, np.array, pd.Series (default None)
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is
stored area value. If set to ``None``, function will calculate areas
during the process without saving them separately.
perimeters : str, list, np.array, pd.Series (default None)
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is
stored perimeter value. If set to ``None``, function will calculate perimeters
during the process without saving them separately.
Attributes
----------
series : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
perimeters : Series
Series containing used perimeter values
areas : Series
Series containing used area values
Examples
--------
>>> buildings_df['fractal'] = momepy.FractalDimension(buildings_df,
... 'area',
... 'peri').series
>>> buildings_df.fractal[0]
1.0726778567038908
"""
def __init__(self, gdf, areas=None, perimeters=None):
self.gdf = gdf
gdf = gdf.copy()
if perimeters is None:
gdf["mm_p"] = gdf.geometry.length
perimeters = "mm_p"
else:
if not isinstance(perimeters, str):
gdf["mm_p"] = perimeters
perimeters = "mm_p"
self.perimeters = gdf[perimeters]
if areas is None:
areas = gdf.geometry.area
if not isinstance(areas, str):
gdf["mm_a"] = areas
areas = "mm_a"
self.series = pd.Series(
(2 * np.log(gdf[perimeters] / 4)) / np.log(gdf[areas]), index=gdf.index
)
class VolumeFacadeRatio:
"""
Calculates volume/facade ratio of each object in given GeoDataFrame.
.. math::
volume \\over perimeter * height
Adapted from :cite:`schirmer2015`.
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects
heights : str, list, np.array, pd.Series (default None)
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is
stored height value
volumes : str, list, np.array, pd.Series (default None)
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is
stored volume value
perimeters : , list, np.array, pd.Series (default None)
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is
stored perimeter value
Attributes
----------
series : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
perimeters : Series
Series containing used perimeter values
volumes : Series
Series containing used volume values
Examples
--------
>>> buildings_df['vfr'] = momepy.VolumeFacadeRatio(buildings_df, 'height').series
>>> buildings_df.vfr[0]
5.310715735236504
"""
def __init__(self, gdf, heights, volumes=None, perimeters=None):
self.gdf = gdf
gdf = gdf.copy()
if perimeters is None:
gdf["mm_p"] = gdf.geometry.length
perimeters = "mm_p"
else:
if not isinstance(perimeters, str):
gdf["mm_p"] = perimeters
perimeters = "mm_p"
self.perimeters = gdf[perimeters]
if volumes is None:
gdf["mm_v"] = gdf.geometry.area * gdf[heights]
volumes = "mm_v"
else:
if not isinstance(volumes, str):
gdf["mm_v"] = volumes
volumes = "mm_v"
self.volumes = gdf[volumes]
self.series = gdf[volumes] / (gdf[perimeters] * gdf[heights])
# Smallest enclosing circle - Library (Python)
# Copyright (c) 2017 Project Nayuki
# https://www.nayuki.io/page/smallest-enclosing-circle
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program (see COPYING.txt and COPYING.LESSER.txt).
# If not, see <http://www.gnu.org/licenses/>.
# Data conventions: A point is a pair of floats (x, y).
# A circle is a triple of floats (center x, center y, radius).
# Returns the smallest circle that encloses all the given points.
# Runs in expected O(n) time, randomized.
# Input: A sequence of pairs of floats or ints, e.g. [(0,5), (3.1,-2.7)].
# Output: A triple of floats representing a circle.
# Note: If 0 points are given, None is returned. If 1 point is given,
# a circle of radius 0 is returned.
#
# Initially: No boundary points known
def _make_circle(points):
# Convert to float and randomize order
shuffled = [(float(x), float(y)) for (x, y) in points]
random.shuffle(shuffled)
# Progressively add points to circle or recompute circle
c = None
for (i, p) in enumerate(shuffled):
if c is None or not _is_in_circle(c, p):
c = _make_circle_one_point(shuffled[: i + 1], p)
return c
# One boundary point known
def _make_circle_one_point(points, p):
c = (p[0], p[1], 0.0)
for (i, q) in enumerate(points):
if not _is_in_circle(c, q):
if c[2] == 0.0:
c = _make_diameter(p, q)
else:
c = _make_circle_two_points(points[: i + 1], p, q)
return c
# Two boundary points known
def _make_circle_two_points(points, p, q):
circ = _make_diameter(p, q)
left = None
right = None
px, py = p
qx, qy = q
# For each point not in the two-point circle
for r in points:
if _is_in_circle(circ, r):
continue
# Form a circumcircle and classify it on left or right side
cross = _cross_product(px, py, qx, qy, r[0], r[1])
c = _make_circumcircle(p, q, r)
if c is None:
continue
elif cross > 0.0 and (
left is None
or _cross_product(px, py, qx, qy, c[0], c[1])
> _cross_product(px, py, qx, qy, left[0], left[1])
):
left = c
elif cross < 0.0 and (
right is None
or _cross_product(px, py, qx, qy, c[0], c[1])
< _cross_product(px, py, qx, qy, right[0], right[1])
):
right = c
# Select which circle to return
if left is None and right is None:
return circ
if left is None:
return right
if right is None:
return left
if left[2] <= right[2]:
return left
return right
def _make_circumcircle(p0, p1, p2):
# Mathematical algorithm from Wikipedia: Circumscribed circle
ax, ay = p0
bx, by = p1
cx, cy = p2
ox = (min(ax, bx, cx) + max(ax, bx, cx)) / 2.0
oy = (min(ay, by, cy) + max(ay, by, cy)) / 2.0
ax -= ox
ay -= oy
bx -= ox
by -= oy
cx -= ox
cy -= oy
d = (ax * (by - cy) + bx * (cy - ay) + cx * (ay - by)) * 2.0
if d == 0.0:
return None
x = (
ox
+ (
(ax * ax + ay * ay) * (by - cy)
+ (bx * bx + by * by) * (cy - ay)
+ (cx * cx + cy * cy) * (ay - by)
)
/ d
)
y = (
oy
+ (
(ax * ax + ay * ay) * (cx - bx)
+ (bx * bx + by * by) * (ax - cx)
+ (cx * cx + cy * cy) * (bx - ax)
)
/ d
)
ra = math.hypot(x - p0[0], y - p0[1])
rb = math.hypot(x - p1[0], y - p1[1])
rc = math.hypot(x - p2[0], y - p2[1])
return (x, y, max(ra, rb, rc))
def _make_diameter(p0, p1):
cx = (p0[0] + p1[0]) / 2.0
cy = (p0[1] + p1[1]) / 2.0
r0 = math.hypot(cx - p0[0], cy - p0[1])
r1 = math.hypot(cx - p1[0], cy - p1[1])
return (cx, cy, max(r0, r1))
_MULTIPLICATIVE_EPSILON = 1 + 1e-14
def _is_in_circle(c, p):
return (
c is not None
and math.hypot(p[0] - c[0], p[1] - c[1]) <= c[2] * _MULTIPLICATIVE_EPSILON
)
# Returns twice the signed area of the triangle defined by (x0, y0), (x1, y1), (x2, y2).
def _cross_product(x0, y0, x1, y1, x2, y2):
return (x1 - x0) * (y2 - y0) - (y1 - y0) * (x2 - x0)
# end of Nayuiki script to define the smallest enclosing circle
# calculate the area of circumcircle
def _circle_area(points):
if len(points[0]) == 3:
points = [x[:2] for x in points]
circ = _make_circle(points)
return math.pi * circ[2] ** 2
def _circle_radius(points):
if len(points[0]) == 3:
points = [x[:2] for x in points]
circ = _make_circle(points)
return circ[2]
class CircularCompactness:
"""
Calculates compactness index of each object in given GeoDataFrame.
.. math::
area \\over \\textit{area of enclosing circle}
Adapted from :cite:`dibble2017`.
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects
areas : str, list, np.array, pd.Series (default None)
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is
stored area value. If set to ``None``, function will calculate areas
during the process without saving them separately.
Attributes
----------
series : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
areas : Series
Series containing used area values
Examples
--------
>>> buildings_df['comp'] = momepy.CircularCompactness(buildings_df, 'area').series
>>> buildings_df['comp'][0]
0.572145421828038
"""
def __init__(self, gdf, areas=None):
self.gdf = gdf
if areas is None:
areas = gdf.geometry.area
elif isinstance(areas, str):
areas = gdf[areas]
self.areas = areas
hull = gdf.convex_hull.exterior
radius = hull.apply(
lambda g: _circle_radius(list(g.coords)) if g is not None else None
)
self.series = areas / (np.pi * radius ** 2)
class SquareCompactness:
"""
Calculates compactness index of each object in given GeoDataFrame.
.. math::
\\begin{equation*}
\\left(\\frac{4 \\sqrt{area}}{perimeter}\\right) ^ 2
\\end{equation*}
Adapted from :cite:`feliciotti2018`.
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects
areas : str, list, np.array, pd.Series (default None)
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is
stored area value. If set to ``None``, function will calculate areas
during the process without saving them separately.
perimeters : str, list, np.array, pd.Series (default None)
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is
stored perimeter value. If set to ``None``, function will calculate perimeters
during the process without saving them separately.
Attributes
----------
series : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
areas : Series
Series containing used area values
perimeters : Series
Series containing used perimeter values
Examples
--------
>>> buildings_df['squ_comp'] = momepy.SquareCompactness(buildings_df).series
>>> buildings_df['squ_comp'][0]
0.6193872538650996
"""
def __init__(self, gdf, areas=None, perimeters=None):
self.gdf = gdf
gdf = gdf.copy()
if perimeters is None:
gdf["mm_p"] = gdf.geometry.length
perimeters = "mm_p"
else:
if not isinstance(perimeters, str):
gdf["mm_p"] = perimeters
perimeters = "mm_p"
self.perimeters = gdf[perimeters]
if areas is None:
areas = gdf.geometry.area
if not isinstance(areas, str):
gdf["mm_a"] = areas
areas = "mm_a"
self.areas = gdf[areas]
self.series = ((np.sqrt(gdf[areas]) * 4) / gdf[perimeters]) ** 2
class Convexity:
"""
Calculates Convexity index of each object in given GeoDataFrame.
.. math::
area \\over \\textit{convex hull area}
Adapted from :cite:`dibble2017`.
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects
areas : str, list, np.array, pd.Series (default None)
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is
stored area value. If set to ``None``, function will calculate areas
during the process without saving them separately.
Attributes
----------
series : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
areas : Series
Series containing used area values
Examples
--------
>>> buildings_df['convexity'] = momepy.Convexity(buildings_df).series
>>> buildings_df['convexity'][0]
0.8151964258521672
"""
def __init__(self, gdf, areas=None):
self.gdf = gdf
gdf = gdf.copy()
if areas is None:
areas = gdf.geometry.area
if not isinstance(areas, str):
gdf["mm_a"] = areas
areas = "mm_a"
self.areas = gdf[areas]
self.series = gdf[areas] / gdf.geometry.convex_hull.area
class CourtyardIndex:
"""
Calculates courtyard index of each object in given GeoDataFrame.
.. math::
\\textit{area of courtyards} \\over \\textit{total area}
Adapted from :cite:`schirmer2015`.
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects
courtyard_areas : str, list, np.array, pd.Series
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is
stored area value
(To calculate volume you can use :py:class:`momepy.CourtyardArea`)
areas : str, list, np.array, pd.Series (default None)
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is
stored area value. If set to ``None``, function will calculate areas
during the process without saving them separately.
Attributes
----------
series : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
courtyard_areas : Series
Series containing used courtyard areas values
areas : Series
Series containing used area values
Examples
--------
>>> buildings_df['courtyard_index'] = momepy.CourtyardIndex(buildings,
... 'courtyard_area',
... 'area').series
>>> buildings_df.courtyard_index[80]
0.16605915738643523
"""
def __init__(self, gdf, courtyard_areas, areas=None):
self.gdf = gdf
gdf = gdf.copy()
if not isinstance(courtyard_areas, str):
gdf["mm_ca"] = courtyard_areas
courtyard_areas = "mm_ca"
self.courtyard_areas = gdf[courtyard_areas]
if areas is None:
areas = gdf.geometry.area
if not isinstance(areas, str):
gdf["mm_a"] = areas
areas = "mm_a"
self.areas = gdf[areas]
self.series = gdf[courtyard_areas] / gdf[areas]
class Rectangularity:
"""
Calculates rectangularity of each object in given GeoDataFrame.
.. math::
{area \\over \\textit{minimum bounding rotated rectangle area}}
Adapted from :cite:`dibble2017`.
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects
areas : str, list, np.array, pd.Series (default None)
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is
stored area value. If set to ``None``, function will calculate areas
during the process without saving them separately.
Attributes
----------
series : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
areas : Series
Series containing used area values
Examples
--------
>>> buildings_df['rect'] = momepy.Rectangularity(buildings_df, 'area').series
100%|██████████| 144/144 [00:00<00:00, 866.62it/s]
>>> buildings_df.rect[0]
0.6942676157646379
"""
def __init__(self, gdf, areas=None):
# TODO: vectorize minimum_rotated_rectangle after pygeos implementation
self.gdf = gdf
gdf = gdf.copy()
if areas is None:
areas = gdf.geometry.area
if not isinstance(areas, str):
gdf["mm_a"] = areas
areas = "mm_a"
self.areas = gdf[areas]
self.series = gdf.apply(
lambda row: row[areas] / (row.geometry.minimum_rotated_rectangle.area),
axis=1,
)
class ShapeIndex:
"""
Calculates shape index of each object in given GeoDataFrame.
.. math::
{\\sqrt{{area} \\over {\\pi}}} \\over {0.5 * \\textit{longest axis}}
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects
longest_axis : str, list, np.array, pd.Series
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is
stored longest axis value
areas : str, list, np.array, pd.Series (default None)
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is
stored area value. If set to ``None``, function will calculate areas
during the process without saving them separately.
Attributes
----------
series : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
longest_axis : Series
Series containing used longest axis values
areas : Series
Series containing used area values
Examples
--------
>>> buildings_df['shape_index'] = momepy.ShapeIndex(buildings_df,
... longest_axis='long_ax',
... areas='area').series
100%|██████████| 144/144 [00:00<00:00, 5558.33it/s]
>>> buildings_df['shape_index'][0]
0.7564029493781987
"""
def __init__(self, gdf, longest_axis, areas=None):
self.gdf = gdf
gdf = gdf.copy()
if not isinstance(longest_axis, str):
gdf["mm_la"] = longest_axis
longest_axis = "mm_la"
self.longest_axis = gdf[longest_axis]
if areas is None:
areas = gdf.geometry.area
if not isinstance(areas, str):
gdf["mm_a"] = areas
areas = "mm_a"
self.areas = gdf[areas]
self.series = pd.Series(
np.sqrt(gdf[areas] / np.pi) / (0.5 * gdf[longest_axis]), index=gdf.index
)
class Corners:
"""
Calculates number of corners of each object in given GeoDataFrame.
Uses only external shape (``shapely.geometry.exterior``), courtyards are not
included.
.. math::
\\sum corner
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects
verbose : bool (default True)
if True, shows progress bars in loops and indication of steps
Attributes
----------
series : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
Examples
--------
>>> buildings_df['corners'] = momepy.Corners(buildings_df).series
100%|██████████| 144/144 [00:00<00:00, 1042.15it/s]
>>> buildings_df.corners[0]
24
"""
def __init__(self, gdf, verbose=True):
self.gdf = gdf
# define empty list for results
results_list = []
# calculate angle between points, return true or false if real corner
def _true_angle(a, b, c):
ba = a - b
bc = c - b
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
angle = np.arccos(cosine_angle)
# TODO: add arg to specify these values
if np.degrees(angle) <= 170:
return True
if np.degrees(angle) >= 190:
return True
return False
# fill new column with the value of area, iterating over rows one by one
for geom in tqdm(gdf.geometry, total=gdf.shape[0], disable=not verbose):
if geom.type == "Polygon":
corners = 0 # define empty variables
points = list(geom.exterior.coords) # get points of a shape
stop = len(points) - 1 # define where to stop
for i in np.arange(
len(points)
): # for every point, calculate angle and add 1 if True angle
if i == 0:
continue
elif i == stop:
a = np.asarray(points[i - 1])
b = np.asarray(points[i])
c = np.asarray(points[1])
if _true_angle(a, b, c) is True:
corners = corners + 1
else:
continue
else:
a = np.asarray(points[i - 1])
b = np.asarray(points[i])
c = np.asarray(points[i + 1])
if _true_angle(a, b, c) is True:
corners = corners + 1
else:
continue
elif geom.type == "MultiPolygon":
corners = 0 # define empty variables
for g in geom.geoms:
points = list(g.exterior.coords) # get points of a shape
stop = len(points) - 1 # define where to stop
for i in np.arange(
len(points)
): # for every point, calculate angle and add 1 if True angle
if i == 0:
continue
elif i == stop:
a = np.asarray(points[i - 1])
b = np.asarray(points[i])
c = np.asarray(points[1])
if _true_angle(a, b, c) is True:
corners = corners + 1
else:
continue
else:
a = np.asarray(points[i - 1])
b = np.asarray(points[i])
c = np.asarray(points[i + 1])
if _true_angle(a, b, c) is True:
corners = corners + 1
else:
continue
else:
corners = np.nan
results_list.append(corners)
self.series = pd.Series(results_list, index=gdf.index)
class Squareness:
"""
Calculates squareness of each object in given GeoDataFrame.
Uses only external shape (``shapely.geometry.exterior``), courtyards are not
included.
.. math::
\\mu=\\frac{\\sum_{i=1}^{N} d_{i}}{N}
where :math:`d` is the deviation of angle of corner :math:`i` from 90 degrees.
Adapted from :cite:`dibble2017`.
Returns ``np.nan`` for MultiPolygons.
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects
verbose : bool (default True)
if True, shows progress bars in loops and indication of steps
Attributes
----------
series : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
Examples
--------
>>> buildings_df['squareness'] = momepy.Squareness(buildings_df).series
100%|██████████| 144/144 [00:01<00:00, 129.49it/s]
>>> buildings_df.squareness[0]
3.7075816043359864
"""
def __init__(self, gdf, verbose=True):
self.gdf = gdf
# define empty list for results
results_list = []
def _angle(a, b, c):
ba = a - b
bc = c - b
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
angle = np.degrees(np.arccos(cosine_angle))
return angle
# fill new column with the value of area, iterating over rows one by one
for geom in tqdm(gdf.geometry, total=gdf.shape[0], disable=not verbose):
if geom.type == "Polygon":
angles = []
points = list(geom.exterior.coords) # get points of a shape
stop = len(points) - 1 # define where to stop
for i in np.arange(
len(points)
): # for every point, calculate angle and add 1 if True angle
if i == 0:
continue
elif i == stop:
a = np.asarray(points[i - 1])
b = np.asarray(points[i])
c = np.asarray(points[1])
ang = _angle(a, b, c)
if ang <= 175:
angles.append(ang)
elif _angle(a, b, c) >= 185:
angles.append(ang)
else:
continue
else:
a = np.asarray(points[i - 1])
b = np.asarray(points[i])
c = np.asarray(points[i + 1])
ang = _angle(a, b, c)
if _angle(a, b, c) <= 175:
angles.append(ang)
elif _angle(a, b, c) >= 185:
angles.append(ang)
else:
continue
deviations = [abs(90 - i) for i in angles]
results_list.append(np.mean(deviations))
else:
results_list.append(np.nan)
self.series = pd.Series(results_list, index=gdf.index)
class EquivalentRectangularIndex:
"""
Calculates equivalent rectangular index of each object in given GeoDataFrame.
.. math::
\\sqrt{{area} \\over \\textit{area of bounding rectangle}} *
{\\textit{perimeter of bounding rectangle} \\over {perimeter}}
Based on :cite:`basaraner2017`.
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects
areas : str, list, np.array, pd.Series (default None)
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is
stored area value. If set to ``None``, function will calculate areas
during the process without saving them separately.
perimeters : str, list, np.array, pd.Series (default None)
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is
stored perimeter value. If set to ``None``, function will calculate perimeters
during the process without saving them separately.
Attributes
----------
series : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
areas : Series
Series containing used area values
perimeters : Series
Series containing used perimeter values
Examples
--------
>>> buildings_df['eri'] = momepy.EquivalentRectangularIndex(buildings_df,
... 'area',
... 'peri').series
>>> buildings_df['eri'][0]
0.7879229963118455
"""
def __init__(self, gdf, areas=None, perimeters=None):
self.gdf = gdf
# define empty list for results
if perimeters is None:
perimeters = gdf.geometry.length
else:
if isinstance(perimeters, str):
perimeters = gdf[perimeters]
self.perimeters = perimeters
if areas is None:
areas = gdf.geometry.area
else:
if isinstance(areas, str):
areas = gdf[areas]
self.areas = areas
# TODO: vectorize minimum_rotated_rectangle after pygeos implementation
bbox = gdf.geometry.apply(lambda g: g.minimum_rotated_rectangle)
res = np.sqrt(areas / bbox.area) * (bbox.length / perimeters)
self.series = pd.Series(res, index=gdf.index)
class Elongation:
"""
Calculates elongation of object seen as elongation of
its minimum bounding rectangle.
.. math::
{{p - \\sqrt{p^2 - 16a}} \\over {4}} \\over
{{{p} \\over {2}} - {{p - \\sqrt{p^2 - 16a}} \\over {4}}}
where `a` is the area of the object and `p` its perimeter.
Based on :cite:`gil2012`.
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects
Attributes
----------
e : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
Examples
--------
>>> buildings_df['elongation'] = momepy.Elongation(buildings_df).series
>>> buildings_df['elongation'][0]
0.9082437463675544
"""
def __init__(self, gdf):
self.gdf = gdf
# TODO: vectorize minimum_rotated_rectangle after pygeos implementation
bbox = gdf.geometry.apply(lambda g: g.minimum_rotated_rectangle)
a = bbox.area
p = bbox.length
cond1 = p ** 2
cond2 = 16 * a
bigger = cond1 >= cond2
sqrt = np.empty(len(a))
sqrt[bigger] = cond1[bigger] - cond2[bigger]
sqrt[~bigger] = 0
# calculate both width/length and length/width
elo1 = ((p - np.sqrt(sqrt)) / 4) / ((p / 2) - ((p - np.sqrt(sqrt)) / 4))
elo2 = ((p + np.sqrt(sqrt)) / 4) / ((p / 2) - ((p + np.sqrt(sqrt)) / 4))
# use the smaller one (e.g. shorter/longer)
res = np.empty(len(a))
res[elo1 <= elo2] = elo1[elo1 <= elo2]
res[~(elo1 <= elo2)] = elo2[~(elo1 <= elo2)]
self.series = pd.Series(res, index=gdf.index)
class CentroidCorners:
"""
Calculates mean distance centroid - corners and st. deviation.
.. math::
\\overline{x}=\\frac{1}{n}\\left(\\sum_{i=1}^{n} dist_{i}\\right);
\\space \\mathrm{SD}=\\sqrt{\\frac{\\sum|x-\\overline{x}|^{2}}{n}}
Adapted from :cite:`schirmer2015` and :cite:`cimburova2017`.
Returns ``np.nan`` for MultiPolygons.
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects
verbose : bool (default True)
if True, shows progress bars in loops and indication of steps
Attributes
----------
mean : Series
Series containing mean distance values.
std : Series
Series containing standard deviation values.
gdf : GeoDataFrame
original GeoDataFrame
Examples
--------
>>> ccd = momepy.CentroidCorners(buildings_df)
100%|██████████| 144/144 [00:00<00:00, 846.58it/s]
>>> buildings_df['ccd_means'] = ccd.means
>>> buildings_df['ccd_stdev'] = ccd.std
>>> buildings_df['ccd_means'][0]
15.961531913184833
>>> buildings_df['ccd_stdev'][0]
3.0810634305400177
"""
def __init__(self, gdf, verbose=True):
self.gdf = gdf
# define empty list for results
results_list = []
results_list_sd = []
# calculate angle between points, return true or false if real corner
def true_angle(a, b, c):
ba = a - b
bc = c - b
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
angle = np.arccos(cosine_angle)
if np.degrees(angle) <= 170:
return True
if np.degrees(angle) >= 190:
return True
return False
# iterating over rows one by one
for geom in tqdm(gdf.geometry, total=gdf.shape[0], disable=not verbose):
if geom.type == "Polygon":
distances = [] # set empty list of distances
centroid = geom.centroid # define centroid
points = list(geom.exterior.coords) # get points of a shape
stop = len(points) - 1 # define where to stop
for i in np.arange(
len(points)
): # for every point, calculate angle and add 1 if True angle
if i == 0:
continue
elif i == stop:
a = np.asarray(points[i - 1])
b = np.asarray(points[i])
c = np.asarray(points[1])
p = Point(points[i])
if true_angle(a, b, c) is True:
distance = centroid.distance(
p
) # calculate distance point - centroid
distances.append(distance) # add distance to the list
else:
continue
else:
a = np.asarray(points[i - 1])
b = np.asarray(points[i])
c = np.asarray(points[i + 1])
p = Point(points[i])
if true_angle(a, b, c) is True:
distance = centroid.distance(p)
distances.append(distance)
else:
continue
if not distances: # circular buildings
if geom.has_z:
coords = [
(coo[0], coo[1]) for coo in geom.convex_hull.exterior.coords
]
else:
coords = geom.convex_hull.exterior.coords
results_list.append(_circle_radius(coords))
results_list_sd.append(0)
else:
results_list.append(np.mean(distances)) # calculate mean
results_list_sd.append(np.std(distances)) # calculate st.dev
else:
results_list.append(np.nan)
results_list_sd.append(np.nan)
self.mean = | pd.Series(results_list, index=gdf.index) | pandas.Series |
from brightics.common.repr import BrtcReprBuilder
from brightics.common.repr import strip_margin
from brightics.common.repr import dict2MD
from brightics.common.repr import pandasDF2MD
from brightics.common.utils import check_required_parameters
from brightics.common.groupby import _function_by_group
import numpy as np
import pandas as pd
import math
from math import sqrt
from scipy.stats import t
from scipy import mean, stats
from statsmodels.stats.weightstats import ttest_ind
def one_sample_ttest(table, group_by=None, **params):
check_required_parameters(_one_sample_ttest, params, ['table'])
if group_by is not None:
return _function_by_group(_one_sample_ttest, table, group_by=group_by, **params)
else:
return _one_sample_ttest(table, **params)
def _width(col, alpha, n):
# t.ppf(1.0 - alpha, n-1) is a t-critical value
return stats.t.ppf(1.0 - alpha, n - 1) * col.std() / np.sqrt(n)
def _test_result(alter, hypothesized_mean, mean, t_value, p_value_two, width_one_sided, width_two_sided):
if alter == 'Greater':
H1 = 'true mean > {}'.format(hypothesized_mean)
if t_value >= 0:
p_value = p_value_two / 2
else:
p_value = 1 - p_value_two / 2
lower_conf_interval = mean - width_one_sided
upper_conf_interval = np.inf
if alter == 'Less':
H1 = 'true mean < {}'.format(hypothesized_mean)
if t_value >= 0:
p_value = 1 - p_value_two / 2
else:
p_value = p_value_two / 2
lower_conf_interval = -np.inf
upper_conf_interval = mean + width_one_sided
if alter == 'Two Sided':
H1 = 'true mean != {}'.format(hypothesized_mean)
p_value = p_value_two
lower_conf_interval = mean - width_two_sided
upper_conf_interval = mean + width_two_sided
return (H1, p_value, lower_conf_interval, upper_conf_interval)
def _result_table(input_cols, alternatives, result_dict):
out_cols = ['data', 'alternative_hypothesis', 'statistics', 't_value',
'p_value', 'confidence_level', 'lower_confidence_interval', 'upper_confidence_interval', 'confidence_interval']
row_dict_list = []
for input_col in input_cols:
for alter in alternatives:
row_dict = result_dict[input_col][alter]
row_dict['data'] = input_col
row_dict_list.append(row_dict)
return pd.DataFrame.from_dict(row_dict_list).reindex(columns=out_cols)
def _one_sample_ttest_repr(statistics, result_dict, params):
input_cols = params['input_cols']
alternatives = params['alternatives']
hypothesized_mean = params['hypothesized_mean']
conf_level = params['conf_level']
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
| One Sample T Test Result
| - Statistics = {s}
| - Hypothesized mean = {h}
| - Confidence level = {cl}
""".format(s=statistics, h=hypothesized_mean, cl=conf_level)))
for input_col in input_cols:
H1_list = []
p_list = []
CI_list = []
for alter in alternatives:
test_info = result_dict[input_col][alter]
H1_list.append(test_info['alternative_hypothesis'])
p_list.append(test_info['p_value'])
CI_list.append(test_info['confidence_interval'])
result_table = pd.DataFrame.from_items([
['alternative hypothesis', H1_list],
['p-value', p_list],
['%g%% confidence Interval' % (conf_level * 100), CI_list]
])
rb.addMD(strip_margin("""
| ### Data = {input_col}
| - t-value = {t_value}
|
| {result_table}
""".format(input_col=input_col, t_value=result_dict[input_col]['t_value'], result_table=pandasDF2MD(result_table))))
rb.addMD(strip_margin("""
| ### Parameters
| {params}
""".format(params=dict2MD(params))))
return rb
def _one_sample_ttest(table, input_cols, alternatives, hypothesized_mean=0, conf_level=0.95):
n = len(table)
alpha = 1.0 - conf_level
statistics = "t statistic, t distribution with %d degrees of freedom under the null hypothesis." % (n - 1)
result_dict = dict()
for input_col in input_cols:
# sample mean, width of the confidence interval
col = table[input_col]
mean = np.mean(col)
width_one_sided = _width(col, alpha, n)
width_two_sided = _width(col, alpha / 2, n)
# t-statistic, two-tailed p-value
t_value, p_value_two = stats.ttest_1samp(col, hypothesized_mean)
result_dict[input_col] = dict()
result_dict[input_col]['t_value'] = t_value
for alter in alternatives:
(H1, p_value, lower_conf_interval, upper_conf_interval) = _test_result(alter, hypothesized_mean, mean, t_value, p_value_two, width_one_sided, width_two_sided)
confidence_interval = '({lower_conf_interval}, {upper_conf_interval})'.format(lower_conf_interval=lower_conf_interval, upper_conf_interval=upper_conf_interval)
result_dict[input_col][alter] = dict()
result_dict[input_col][alter]['alternative_hypothesis'] = H1
result_dict[input_col][alter]['confidence_level'] = conf_level
result_dict[input_col][alter]['statistics'] = statistics
result_dict[input_col][alter]['t_value'] = t_value
result_dict[input_col][alter]['p_value'] = p_value
result_dict[input_col][alter]['lower_confidence_interval'] = lower_conf_interval
result_dict[input_col][alter]['upper_confidence_interval'] = upper_conf_interval
result_dict[input_col][alter]['confidence_interval'] = confidence_interval
params = {
'input_cols':input_cols,
'alternatives': alternatives,
'hypothesized_mean': hypothesized_mean,
'conf_level': conf_level
}
result = _result_table(input_cols, alternatives, result_dict)
rb = _one_sample_ttest_repr(statistics, result_dict, params)
model = dict()
model['result'] = result
model['_repr_brtc_'] = rb.get()
model['params'] = params
return {'model':model}
def two_sample_ttest_for_stacked_data(table, group_by=None, **params):
check_required_parameters(_two_sample_ttest_for_stacked_data, params, ['table'])
if group_by is not None:
return _function_by_group(_two_sample_ttest_for_stacked_data, table, group_by=group_by, **params)
else:
return _two_sample_ttest_for_stacked_data(table, **params)
def _two_sample_ttest_for_stacked_data(table, response_cols, factor_col, alternatives, first=None , second=None , hypo_diff=0, equal_vari='pooled', confi_level=0.95):
if(type(table[factor_col][0]) != str):
if(type(table[factor_col][0]) == bool):
if(first != None):
first = bool(first)
if(second != None):
second = bool(second)
else:
if(first != None):
first = float(first)
if(second != None):
second = float(second)
if(first == None or second == None):
tmp_factors = []
if(first != None):
tmp_factors += [first]
if(second != None):
tmp_factors += [second]
for i in range(len(table[factor_col])):
if(table[factor_col][i] != None and table[factor_col][i] not in tmp_factors):
if(len(tmp_factors) == 2):
raise Exception("There are more that 2 factors.")
else:
tmp_factors += [table[factor_col][i]]
if(first == None):
if(tmp_factors[0] != second):
first = tmp_factors[0]
else:
first = tmp_factors[1]
if(second == None):
if(tmp_factors[0] != first):
second = tmp_factors[0]
else:
second = tmp_factors[1]
table_first = table[table[factor_col] == first]
table_second = table[table[factor_col] == second]
tmp_table = []
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
## Two Sample T Test for Stacked Data Result
| - Hypothesized mean = {hypo_diff}
| - Confidence level = {confi_level}
""".format(hypo_diff=hypo_diff, confi_level=confi_level)))
for response_col in response_cols:
tmp_model = []
number1 = len(table_first[response_col])
number2 = len(table_second[response_col])
mean1 = (table_first[response_col]).mean()
mean2 = (table_second[response_col]).mean()
std1 = (table_first[response_col]).std()
std2 = (table_second[response_col]).std()
start_auto = 0
if(equal_vari == 'auto'):
start_auto = 1
f_value = (std1 ** 2) / (std2 ** 2)
f_test_p_value_tmp = stats.f.cdf(1 / f_value, number1 - 1, number2 - 1)
if(f_test_p_value_tmp > 0.5):
f_test_p_value = (1 - f_test_p_value_tmp) * 2
else:
f_test_p_value = f_test_p_value_tmp * 2
if(f_test_p_value < 0.05):
equal_vari = 'unequal'
else:
equal_vari = 'pooled'
ttestresult = ttest_ind(table_first[response_col], table_second[response_col], 'larger', usevar=equal_vari, value=hypo_diff)
if 'larger' in alternatives:
ttestresult = ttest_ind(table_first[response_col], table_second[response_col], 'larger', usevar=equal_vari, value=hypo_diff)
df = ttestresult[2]
if(equal_vari == 'pooled'):
std_number1number2 = sqrt(((number1 - 1) * (std1) ** 2 + (number2 - 1) * (std2) ** 2) / (number1 + number2 - 2))
margin = t.ppf((confi_level) , df) * std_number1number2 * sqrt(1 / number1 + 1 / number2)
if(equal_vari == 'unequal'):
margin = t.ppf((confi_level) , df) * sqrt(std1 ** 2 / (number1) + std2 ** 2 / (number2))
tmp_model += [['true difference in means > 0.0'] +
[ttestresult[1]] + [(mean1 - mean2 - margin, math.inf)]]
tmp_table += [['%s by %s(%s,%s)' % (response_col, factor_col, first, second)] +
['true difference in means > 0.0'] +
['t statistic, t distribution with %f degrees of freedom under the null hypothesis' % ttestresult[2]] +
[ttestresult[0]] + [ttestresult[1]] + [confi_level] + [mean1 - mean2 - margin] + [math.inf]]
if 'smaller' in alternatives:
ttestresult = ttest_ind(table_first[response_col], table_second[response_col], 'smaller', usevar=equal_vari, value=hypo_diff)
df = ttestresult[2]
if(equal_vari == 'pooled'):
std_number1number2 = sqrt(((number1 - 1) * (std1) ** 2 + (number2 - 1) * (std2) ** 2) / (number1 + number2 - 2))
margin = t.ppf((confi_level) , df) * std_number1number2 * sqrt(1 / number1 + 1 / number2)
if(equal_vari == 'unequal'):
margin = t.ppf((confi_level) , df) * sqrt(std1 ** 2 / (number1) + std2 ** 2 / (number2))
tmp_model += [['true difference in means < 0.0'] +
[ttestresult[1]] + [(-math.inf, mean1 - mean2 + margin)]]
tmp_table += [['%s by %s(%s,%s)' % (response_col, factor_col, first, second)] +
['true difference in means < 0.0'] +
['t statistic, t distribution with %f degrees of freedom under the null hypothesis' % ttestresult[2]] +
[ttestresult[0]] + [ttestresult[1]] + [confi_level] + [-math.inf] + [mean1 - mean2 + margin]]
if 'two-sided' in alternatives:
ttestresult = ttest_ind(table_first[response_col], table_second[response_col], 'two-sided', usevar=equal_vari, value=hypo_diff)
df = ttestresult[2]
if(equal_vari == 'pooled'):
std_number1number2 = sqrt(((number1 - 1) * (std1) ** 2 + (number2 - 1) * (std2) ** 2) / (number1 + number2 - 2))
margin = t.ppf((confi_level + 1) / 2 , df) * std_number1number2 * sqrt(1 / number1 + 1 / number2)
if(equal_vari == 'unequal'):
margin = t.ppf((confi_level + 1) / 2 , df) * sqrt(std1 ** 2 / (number1) + std2 ** 2 / (number2))
tmp_model += [['true difference in means != 0.0'] +
[ttestresult[1]] + [(mean1 - mean2 - margin, mean1 - mean2 + margin)]]
tmp_table += [['%s by %s(%s,%s)' % (response_col, factor_col, first, second)] +
['true difference in means != 0.0'] +
['t statistic, t distribution with %f degrees of freedom under the null hypothesis' % ttestresult[2]] +
[ttestresult[0]] + [ttestresult[1]] + [confi_level] + [mean1 - mean2 - margin] + [mean1 - mean2 + margin]]
result_model = pd.DataFrame.from_records(tmp_model)
result_model.columns = ['alternative hypothesis', 'p-value', '%g%% confidence interval' % (confi_level * 100)]
rb.addMD(strip_margin("""
| #### Data = {response_col} by {factor_col}({first},{second})
| - Statistics = t statistic, t distribution with {ttestresult2} degrees of freedom under the null hypothesis
| - t-value = {ttestresult0}
|
| {result_model}
|
""".format(ttestresult2=ttestresult[2], response_col=response_col, factor_col=factor_col, first=first, second=second, ttestresult0=ttestresult[0], result_model=pandasDF2MD(result_model))))
if(start_auto == 1):
equal_vari = 'auto'
result = | pd.DataFrame.from_records(tmp_table) | pandas.DataFrame.from_records |
import ipyleaflet
import ipywidgets
import pandas as pd
import geopandas as gpd
from shapely.geometry import Polygon, Point
import datetime
import requests
import xml.etree.ElementTree as ET
import calendar
import numpy as np
import pathlib
import os
import bqplot as bq
from functools import reduce
class ANA_interactive_map:
def __init__(self):
self.m01 = ipyleaflet.Map(zoom=4, center=(-16, -50), scroll_wheel_zoom=True,layout=ipywidgets.Layout(width='60%', height='500px'))
self.controls_on_Map()
self.out01 = ipywidgets.Output()
self.tabs = ipywidgets.Tab([self.tab00(), self.tab01(), self.tab02(),self.tab03(), self.tab04()], layout=ipywidgets.Layout(width='40%'))
self.tabs.set_title(0, 'Inventory ')
self.tabs.set_title(1, 'Tables')
self.tabs.set_title(2, 'Stats')
self.tabs.set_title(3, 'Download')
self.tabs.set_title(4, 'Graphs')
display(ipywidgets.VBox([ipywidgets.HBox([self.m01, self.tabs]),
self.out01]))
def controls_on_Map(self):
# pass
layer_control = ipyleaflet.LayersControl(position='topright')
self.m01.add_control(layer_control)
fullscreen_control = ipyleaflet.FullScreenControl()
self.m01.add_control(fullscreen_control)
self.draw_control = ipyleaflet.DrawControl()
self.m01.add_control(self.draw_control)
self.draw_control.observe(self._draw_testeObserve, 'last_draw')
self.draw_control.observe(self._output_stats, 'last_draw')
self.draw_control.observe(self._output_stats02, 'last_draw')
scale_control = ipyleaflet.ScaleControl(position='bottomleft')
self.m01.add_control(scale_control)
# Layer too slow to used
# marks = tuple([ipyleaflet.Marker(location=(lat, lon)) for lat, lon in self.df[['Latitude', 'Longitude']].to_numpy()])
# marker_cluster = ipyleaflet.MarkerCluster(markers=marks)
# self.m01.add_layer(marker_cluster)
def tab00(self):
with self.out01:
self.html_00_01 = ipywidgets.HTML(value="<h2>Inventory</h2><hr><p>In order to utilize the program, you need to insert a <b>Inventory File</b> or get it from the <b>ANA's API</b>.</p><p>After completed the upload of the Inventory, you can select which <b>Layers</b> to visualize by checking the <b>top-right widget</b> on the map.</p>")
self.radioButton_typeInvetario = ipywidgets.RadioButtons(options=['Select Path', 'Get from API'], value=None)
self.radioButton_typeInvetario.observe(self._radioButton_inventario, names='value')
self.text_pathInvetario = ipywidgets.Text(placeholder='Insert path of the Inventario')
self.button_pathInventario = ipywidgets.Button(description='Apply')
self.button_pathInventario.on_click(self._button_pathinventario)
self.button_showInventario = ipywidgets.Button(description='Show')
self.button_showInventario.on_click(self._button_showInventario)
self.floatProgress_loadingInventario = ipywidgets.FloatProgress(min=0, max=1, value=0, layout=ipywidgets.Layout(width='90%'))
self.floatProgress_loadingInventario.bar_style = 'info'
self.intSlider_01 = ipywidgets.IntSlider(description='Radius', min=1, max=50, value=15)
self.intSlider_01.observe(self._intSlider_radius, 'value')
widget_control01 = ipyleaflet.WidgetControl(widget=self.intSlider_01, position='bottomright')
self.m01.add_control(widget_control01)
self.selectionSlider_date01 = ipywidgets.SelectionSlider(options=pd.date_range(start='2000-01-01',end='2020-01-01', freq='M').to_numpy())
self.selectionSlider_date01.observe(self._selection_observe_01, names='value')
widget_control02 = ipyleaflet.WidgetControl(widget=self.selectionSlider_date01, position='bottomright')
self.m01.add_control(widget_control02)
self.html_00_02 = ipywidgets.HTML(value="<hr><p>Save the <b>API's</b> Inventory file:</p>")
self.text_pathSaveInventario = ipywidgets.Text(placeholder='Insert path to Save Inventario')
self.button_pathSaveInventario = ipywidgets.Button(description='Save')
self.button_pathSaveInventario.on_click(self._button_saveInventario)
self.text_pathInvetario.disabled = True
self.button_pathInventario.disabled = True
self.text_pathSaveInventario.disabled = True
self.button_pathSaveInventario.disabled = True
self.intSlider_01.disabled = True
self.selectionSlider_date01.disabled = True
return ipywidgets.VBox([self.html_00_01,
self.radioButton_typeInvetario,
ipywidgets.HBox([self.text_pathInvetario,self.button_pathInventario]),
self.button_showInventario,
self.floatProgress_loadingInventario,
self.html_00_02,
ipywidgets.HBox([self.text_pathSaveInventario, self.button_pathSaveInventario])])
def tab03(self):
self.html_03_01 = ipywidgets.HTML(value="<h2>Download</h2><hr><p>In order to <b>Download</b>, you need the <b>Inventory</b>.<p> Then, you can choose between using the <b>Watershed's Shapefile</b> or <b>Draw a Contour</b>.</p><p> Finally, you'll can choose to download <b>Rain</b> or <b>Flow</b> data.</p> <p>(*)You also, if selected <b>byDate</b> can filter the data.</p>")
self.dropdown_typeDownload = ipywidgets.Dropdown(options=['Watershed', 'All', 'byDate'], value=None, description='Select type:', layout=ipywidgets.Layout(width='90%'))
self.dropdown_typeDownload.observe(self._dropdown_observe_01, names='value')
self.dropdown_typeDownload.observe(self._shapefile_buttom, names='value')
self.text_pathShapefle = ipywidgets.Text(placeholder='Insert Shapefile PATH HERE')
self.button_ViewShapefile = ipywidgets.Button(description='View', layout=ipywidgets.Layout(width='30%'))
self.button_ViewShapefile.on_click(self._shapefile_buttom)
self.text_pathShapefle.disabled = True
self.button_ViewShapefile.disabled = True
self.checkbox_downloadIndividual = ipywidgets.Checkbox(description='Individual Files', value=True)
self.checkbox_downloadGrouped = ipywidgets.Checkbox(description='Grouped Files')
self.text_pathDownload = ipywidgets.Text(placeholder='Write your PATH to Download HERE.')
self.button_download = ipywidgets.Button(description='Download', layout=ipywidgets.Layout(width='30%'))
self.button_download.on_click(self._download_button01)
self.floatProgress_loadingDownload = ipywidgets.FloatProgress(min=0, max=1, value=0, layout=ipywidgets.Layout(width='90%'))
self.radioButton_typeDownload = ipywidgets.RadioButtons(options=['Rain', 'Flow'], layout=ipywidgets.Layout())
return ipywidgets.VBox([ipywidgets.VBox([self.html_03_01,
self.dropdown_typeDownload,
ipywidgets.HBox([self.text_pathShapefle,self.button_ViewShapefile])]),
self.radioButton_typeDownload,
ipywidgets.HBox([self.checkbox_downloadIndividual, self.checkbox_downloadGrouped]),
ipywidgets.HBox([self.text_pathDownload, self.button_download]),
self.floatProgress_loadingDownload])
def tab01(self):
self.out02 = ipywidgets.Output()
with self.out02:
self.html_01_01 = ipywidgets.HTML(value="<h2>Inventory</h2><hr><p>This tab is for the visualization of the <b>Inventory's</b> data table.</p>")
self.dropdown_typeView = ipywidgets.Dropdown(options=['Watershed', 'All', 'byDate'], value=None, description='Select type:', layout=ipywidgets.Layout(width='90%'))
self.dropdown_typeView.observe(self._dropdown_oberve_01_02, 'value')
self.dropdown_typeView.observe(self._selectionMultiple_column, 'value')
self.dropdown_typeView.observe(self._dropdown_observe_02, 'value')
self.text_pathShapefile_02 = ipywidgets.Text(placeholder='Insert Shapefile')
self.button_ViewShapefile_02 = ipywidgets.Button(description='View')
self.button_ViewShapefile_02.on_click(self._shapefile_buttom_02)
self.button_ViewShapefile_02.on_click(self._shapefile_buttom_03)
self.selectionMultiple_df_01 = ipywidgets.SelectMultiple(description='Columns:')
self.selectionMultiple_df_01.observe(self._selectionMultiple_column, 'value')
self.selectionSlider_date02 = ipywidgets.SelectionSlider(options=pd.date_range(start='2000-01-01',end='2020-01-01', freq='M').to_numpy(),layout=ipywidgets.Layout(width='90%'))
self.selectionSlider_date02.observe(self._selection_observe_02, 'value')
self.html_01_02 = ipywidgets.HTML(value="<hr><p>Save the <b>Table</b> below:</p>")
self.text_pathSaveInventarioDF = ipywidgets.Text(placeholder='Insert Path to Save')
self.button_pathSaveInventarioDF = ipywidgets.Button(description='Save')
self.button_pathSaveInventarioDF.on_click(self._button_saveInventarioDF)
self.text_pathShapefile_02.disabled = True
self.button_ViewShapefile_02.disabled = True
self.text_pathSaveInventarioDF.disabled = True
self.button_pathSaveInventarioDF.disabled = True
return ipywidgets.VBox([self.html_01_01,
self.dropdown_typeView,
ipywidgets.HBox([self.text_pathShapefile_02, self.button_ViewShapefile_02]),
self.selectionMultiple_df_01,
self.selectionSlider_date02,
self.html_01_02,
ipywidgets.HBox([self.text_pathSaveInventarioDF, self.button_pathSaveInventarioDF]),
self.out02])
def tab02(self):
self.out03 = ipywidgets.Output()
self.out03_02 = ipywidgets.Output()
self.html_02_01 = ipywidgets.HTML(value="<h2>Inventory</h2><hr><p>This tab is for the visualization of the <b>Inventory's</b> basic stats.</p>")
self.html_02_02 = ipywidgets.HTML()
self.accordion01 = ipywidgets.Accordion([self.out03_02])
self.accordion01.set_title(0, 'More stats')
self.accordion01.selected_index = None
return ipywidgets.VBox([self.html_02_01,
self.dropdown_typeView,
ipywidgets.HBox([self.text_pathShapefile_02, self.button_ViewShapefile_02]),
self.selectionSlider_date02,
self.html_02_02,
self.out03,
self.accordion01])
def tab04(self):
self.html_teste = ipywidgets.HTML(value="<h2>Download</h2><hr><p>In this tab, <b>after completed the Download</b> you can visualize the Date Periods of your data. But first, you'll need to select <b>2 or more columns</b>.</p><p><b>Red</b> means no data in the month.</p><p><b>Blue</b> means at least one day with data in the month.</p>")
self.out04 = ipywidgets.Output()
with self.out04:
# self.x_scale_01 = bq.DateScale()
# self.y_scale_01 = bq.LinearScale()
self.x_scale_hm_01 = bq.OrdinalScale()
self.y_scale_hm_01 = bq.OrdinalScale()
self.c_scale_hm_01 = bq.ColorScale(scheme='RdYlBu')
self.x_axis_01 = bq.Axis(scale=self.x_scale_hm_01,tick_rotate=270,tick_style={'font-size': 12}, num_ticks=5)
self.y_axis_01 = bq.Axis(scale=self.y_scale_hm_01, orientation='vertical',tick_style={'font-size': 10})
self.c_axis_01 = bq.ColorAxis(scale=self.c_scale_hm_01)
self.fig_01 = bq.Figure(axes=[self.x_axis_01, self.y_axis_01],fig_margin={'top':40,'bottom':40,'left':40,'right':40})
# self.dropdown_xAxis_01 = ipywidgets.Dropdown(description='X-Axis')
# self.dropdown_yAxis_01 = ipywidgets.Dropdown(description='Y-Axis')
# self.dropdown_xAxis_01.observe(self._dropdown_observe_axis, 'value')
# self.dropdown_yAxis_01.observe(self._dropdown_observe_axis, 'value')
self.selectionMultiple_column = ipywidgets.SelectMultiple(description='Columns')
self.selectionMultiple_column.observe(self._selectionMultiple_observe_column, 'value')
self.button_datePeriod = ipywidgets.Button(description='Plot')
self.button_datePeriod.on_click(self._button_datePeriod)
return ipywidgets.VBox([self.html_teste,
self.selectionMultiple_column,
self.button_datePeriod,
self.fig_01,
self.out04])
def _api_inventario(self):
api_inventario = 'http://telemetriaws1.ana.gov.br/ServiceANA.asmx/HidroInventario'
params = {'codEstDE':'','codEstATE':'','tpEst':'','nmEst':'','nmRio':'','codSubBacia':'',
'codBacia':'','nmMunicipio':'','nmEstado':'','sgResp':'','sgOper':'','telemetrica':''}
response = requests.get(api_inventario, params)
tree = ET.ElementTree(ET.fromstring(response.content))
root = tree.getroot()
invent_data = {'BaciaCodigo':[],'SubBaciaCodigo':[],'RioCodigo':[],'RioNome':[],'EstadoCodigo':[],
'nmEstado':[],'MunicipioCodigo':[],'nmMunicipio':[],'ResponsavelCodigo':[],
'ResponsavelSigla':[],'ResponsavelUnidade':[],'ResponsavelJurisdicao':[],
'OperadoraCodigo':[],'OperadoraSigla':[],'OperadoraUnidade':[],'OperadoraSubUnidade':[],
'TipoEstacao':[],'Codigo':[],'Nome':[],'CodigoAdicional':[],'Latitude':[],'Longitude':[],
'Altitude':[],'AreaDrenagem':[],'TipoEstacaoEscala':[],'TipoEstacaoRegistradorNivel':[],
'TipoEstacaoDescLiquida':[],'TipoEstacaoSedimentos':[],'TipoEstacaoQualAgua':[],
'TipoEstacaoPluviometro':[],'TipoEstacaoRegistradorChuva':[],'TipoEstacaoTanqueEvapo':[],
'TipoEstacaoClimatologica':[],'TipoEstacaoPiezometria':[],'TipoEstacaoTelemetrica':[],'PeriodoEscalaInicio':[],'PeriodoEscalaFim':[] ,
'PeriodoRegistradorNivelInicio' :[],'PeriodoRegistradorNivelFim' :[],'PeriodoDescLiquidaInicio' :[],'PeriodoDescLiquidaFim':[] ,'PeriodoSedimentosInicio' :[],
'PeriodoSedimentosFim':[] ,'PeriodoQualAguaInicio':[] ,'PeriodoQualAguaFim' :[],'PeriodoPluviometroInicio':[] ,'PeriodoPluviometroFim':[] ,
'PeriodoRegistradorChuvaInicio' :[],'PeriodoRegistradorChuvaFim' :[],'PeriodoTanqueEvapoInicio':[] ,'PeriodoTanqueEvapoFim':[] ,'PeriodoClimatologicaInicio' :[],'PeriodoClimatologicaFim':[] ,
'PeriodoPiezometriaInicio':[] ,'PeriodoPiezometriaFim' :[],'PeriodoTelemetricaInicio' :[],'PeriodoTelemetricaFim' :[],
'TipoRedeBasica' :[],'TipoRedeEnergetica' :[],'TipoRedeNavegacao' :[],'TipoRedeCursoDagua' :[],
'TipoRedeEstrategica':[] ,'TipoRedeCaptacao':[] ,'TipoRedeSedimentos':[] ,'TipoRedeQualAgua':[] ,
'TipoRedeClasseVazao':[] ,'UltimaAtualizacao':[] ,'Operando':[] ,'Descricao':[] ,'NumImagens':[] ,'DataIns':[] ,'DataAlt':[]}
total = len(list(root.iter('Table')))
for i in root.iter('Table'):
for j in invent_data.keys():
d = i.find('{}'.format(j)).text
if j == 'Codigo':
invent_data['{}'.format(j)].append('{:08}'.format(int(d)))
else:
invent_data['{}'.format(j)].append(d)
self.floatProgress_loadingInventario.value += 1/(total)
self.df = pd.DataFrame(invent_data)
self.text_pathSaveInventario.disabled = False
self.button_pathSaveInventario.disabled = False
def download_ANA_stations(self, list_codes, typeData, folder_toDownload):
numberOfcodes = len(list_codes)
count = 0
path_folder = pathlib.Path(folder_toDownload)
self.floatProgress_loadingDownload.bar_style = 'info'
self.dfs_download = []
for station in list_codes:
params = {'codEstacao': station, 'dataInicio': '', 'dataFim': '', 'tipoDados': '{}'.format(typeData), 'nivelConsistencia': ''}
response = requests.get('http://telemetriaws1.ana.gov.br/ServiceANA.asmx/HidroSerieHistorica', params)
tree = ET.ElementTree(ET.fromstring(response.content))
root = tree.getroot()
list_data = []
list_consistenciaF = []
list_month_dates = []
for i in root.iter('SerieHistorica'):
codigo = i.find("EstacaoCodigo").text
consistencia = i.find("NivelConsistencia").text
date = i.find("DataHora").text
date = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
last_day = calendar.monthrange(date.year, date.month)[1]
month_dates = [date + datetime.timedelta(days=i) for i in range(last_day)]
data = []
list_consistencia = []
for day in range(last_day):
if params['tipoDados'] == '3':
value = 'Vazao{:02}'.format(day+1)
try:
data.append(float(i.find(value).text))
list_consistencia.append(int(consistencia))
except TypeError:
data.append(i.find(value).text)
list_consistencia.append(int(consistencia))
except AttributeError:
data.append(None)
list_consistencia.append(int(consistencia))
if params['tipoDados'] == '2':
value = 'Chuva{:02}'.format(day+1)
try:
data.append(float(i.find(value).text))
list_consistencia.append(consistencia)
except TypeError:
data.append(i.find(value).text)
list_consistencia.append(consistencia)
except AttributeError:
data.append(None)
list_consistencia.append(consistencia)
list_data = list_data + data
list_consistenciaF = list_consistenciaF + list_consistencia
list_month_dates = list_month_dates + month_dates
if len(list_data) > 0:
df = pd.DataFrame({'Date': list_month_dates, 'Consistence_{}_{}'.format(typeData,station): list_consistenciaF, 'Data{}_{}'.format(typeData,station): list_data})
if self.checkbox_downloadIndividual.value == True:
filename = '{}_{}.csv'.format(typeData, station)
df.to_csv(path_folder / filename)
else:
pass
count += 1
self.floatProgress_loadingDownload.value = float(count+1)/numberOfcodes
self.dfs_download.append(df)
else:
count += 1
self.floatProgress_loadingDownload.value = float(count+1)/numberOfcodes
try:
self.dfs_merge_teste0 = reduce(lambda left,right: pd.merge(left, right, on=['Date'], how='outer'), self.dfs_download)
if self.checkbox_downloadGrouped.value == True:
self.dfs_merge_teste0.to_csv(path_folder/'GroupedData_{}.csv'.format(typeData))
else:
pass
self.selectionMultiple_column.options = list(filter(lambda i: 'Data' in i, self.dfs_merge_teste0.columns.to_list()))
except:
pass
self.floatProgress_loadingDownload.bar_style = 'success'
# pass
def _radioButton_inventario(self, *args):
with self.out01:
self.floatProgress_loadingInventario.value = 0
# print(self.radioButton_typeInvetario.value)
if self.radioButton_typeInvetario.value == 'Select Path':
self.text_pathInvetario.disabled = False
self.button_pathInventario.disabled = False
else:
self.text_pathInvetario.disabled = True
self.button_pathInventario.disabled = True
# self._api_inventario()
# self.floatProgress_loadingInventario.bar_style = 'success'
def _button_pathinventario(self, *args):
self.path_inventario = self.text_pathInvetario.value
def _button_showInventario(self, *args):
with self.out01:
self.floatProgress_loadingInventario.bar_style = 'info'
if self.radioButton_typeInvetario.value == 'Select Path':
self.floatProgress_loadingInventario.value = 0
try:
self.df = | pd.read_csv(self.path_inventario, engine='python', sep='\t', delimiter=';', parse_dates=['UltimaAtualizacao'],low_memory=False) | pandas.read_csv |
"""
Generates rules by optimising the thresholds of each feature individually, then
combining them.
"""
import pandas as pd
import numpy as np
import math
from itertools import combinations
from iguanas.correlation_reduction.agglomerative_clustering_reducer import AgglomerativeClusteringReducer
import iguanas.utils as utils
from iguanas.rule_application import RuleApplier
from iguanas.rule_generation._base_generator import _BaseGenerator
from iguanas.metrics.pairwise import CosineSimilarity
from iguanas.metrics.classification import FScore, Precision
from iguanas.utils.types import PandasDataFrame, PandasSeries
from iguanas.utils.typing import PandasDataFrameType, PandasSeriesType
from typing import Callable, List, Tuple, Dict
import warnings
f1 = FScore(1)
class RuleGeneratorOpt(_BaseGenerator):
"""
Generate rules by optimising the thresholds of single features and
combining these one condition rules with AND conditions to create more
complex rules.
Parameters
----------
metric : Callable
A function/method which calculates the desired performance metric
(e.g. Fbeta score). Note that the module will assume higher values
correspond to better performing rules.
n_total_conditions : int
The maximum number of conditions per generated rule.
num_rules_keep : int
The top number of rules (by Fbeta score) to keep at the end of each
stage of rule combination. Reducing this number will improve the
runtime, but may result in useful rules being removed.
n_points : int, optional
Number of points to split a numeric feature's range into when
generating the numeric one condition rules. A larger number will result
in better optimised one condition rules, but will take longer to
calculate. Defaults to 10.
ratio_window : int, optional
Factor which determines the optimisation range for numeric features
(e.g. if a numeric field has range of 1 to 11 and ratio_window = 3, the
optimisation range for the <= operator will be from 1 to (11-1)/3 =
3.33; the optimisation range for the >= operator will be from
11-((11-1)/3)=7.67 to 11). A larger number (greater than 1) will result
in a smaller range being used for optimisation of one condition rules;
set to 1 if you want to optimise the one condition rules across the
full range of the numeric feature. Defaults to 2.
one_cond_rule_opt_metric : Callable, optional
The method/function used for optimising the one condition rules. Note
that the module will assume higher values correspond to better
performing rules. Defaults to the method used for calculating the F1
score.
remove_corr_rules : bool, optional
Dictates whether correlated rules should be removed at the end of each
pairwise combination. Defaults to True.
target_feat_corr_types : Union[Dict[str, List[str]], str], optional
Limits the conditions of the rules based on the target-feature
correlation (e.g. if a feature has a positive correlation with respect
to the target, then only greater than operators are used for conditions
that utilise that feature). Can be either a dictionary specifying the
list of positively correlated features wrt the target (under the key
`PositiveCorr`) and negatively correlated features wrt the target
(under the key `NegativeCorr`), or 'Infer' (where each target-feature
correlation type is inferred from the data). Defaults to None.
verbose : int, optional
Controls the verbosity - the higher, the more messages. >0 : gives the
progress of the training of the rules. Defaults to 0.
rule_name_prefix : str, optional
Prefix to use for each rule. Defaults to 'RGO_Rule'.
Attributes
----------
rule_strings : Dict[str, str]
The generated rules, defined using the standard Iguanas string
format (values) and their names (keys).
rule_lambdas : Dict[str, object]
The generated rules, defined using the standard Iguanas lambda
expression format (values) and their names (keys).
lambda_kwargs : Dict[str, object]
The keyword arguments for the generated rules defined using the
standard Iguanas lambda expression format.
rules : Rules
The Rules object containing the generated rules.
rule_names : List[str]
The names of the generated rules.
"""
def __init__(self, metric: Callable,
n_total_conditions: int, num_rules_keep: int, n_points=10,
ratio_window=2, one_cond_rule_opt_metric=f1.fit,
remove_corr_rules=True, target_feat_corr_types=None,
verbose=0, rule_name_prefix='RGO_Rule'):
_BaseGenerator.__init__(
self,
metric=metric,
target_feat_corr_types=target_feat_corr_types,
rule_name_prefix=rule_name_prefix,
)
self.n_total_conditions = n_total_conditions
self.num_rules_keep = num_rules_keep
self.n_points = n_points
self.ratio_window = ratio_window
self.one_cond_rule_opt_metric = one_cond_rule_opt_metric
self.remove_corr_rules = remove_corr_rules
self.verbose = verbose
self.rule_strings = {}
self.rule_names = []
def __repr__(self):
if self.rule_strings:
return f'RuleGeneratorOpt object with {len(self.rule_strings)} rules generated'
else:
return f'RuleGeneratorOpt(metric={self.metric}, n_total_conditions={self.n_total_conditions}, num_rules_keep={self.num_rules_keep}, n_points={self.n_points}, ratio_window={self.ratio_window}, one_cond_rule_opt_metric={self.one_cond_rule_opt_metric}, remove_corr_rules={self.remove_corr_rules}, target_feat_corr_types={self.target_feat_corr_types})'
def fit(self, X: PandasDataFrameType, y: PandasSeriesType,
sample_weight=None) -> PandasDataFrameType:
"""
Generate rules by optimising the thresholds of single features and
combining these one condition rules with AND conditions to create more
complex rules.
Parameters
----------
X : PandasDataFrameType
The feature set used for training the model.
y : PandasSeriesType
The target column.
sample_weight : PandasSeriesType, optional
Record-wise weights to apply. Defaults to None.
Returns
-------
PandasDataFrameType
The binary columns of the generated rules.
"""
utils.check_allowed_types(X, 'X', [PandasDataFrame])
utils.check_allowed_types(y, 'y', [PandasSeries])
if sample_weight is not None:
utils.check_allowed_types(
sample_weight, 'sample_weight', [PandasSeries])
# Ensures rule names are the same when fit run without reinstantiating
self._rule_name_counter = 0
if self.target_feat_corr_types == 'Infer':
self.target_feat_corr_types = self._calc_target_ratio_wrt_features(
X=X, y=y
)
X_rules = pd.DataFrame()
rule_strings = {}
columns_int, columns_cat, columns_float = utils.return_columns_types(
X)
columns_num = [
col for col in columns_int if col not in columns_cat] + columns_float
if columns_num:
if self.verbose > 0:
print(
'--- Generating one condition rules for numeric features ---')
rule_strings_num, X_rules_num = self._generate_numeric_one_condition_rules(
X, y, columns_num, columns_int, sample_weight
)
X_rules = pd.concat([X_rules, X_rules_num], axis=1)
rule_strings.update(rule_strings_num)
if columns_cat:
if self.verbose > 0:
print(
'--- Generating one condition rules for OHE categorical features ---')
rule_strings_cat, X_rules_cat = self._generate_categorical_one_condition_rules(
X, y, columns_cat, sample_weight
)
X_rules = | pd.concat([X_rules, X_rules_cat], axis=1) | pandas.concat |
# Genetic algorithms for the Hypergraph platform.
import numpy as np
from . import graph as hgg
from . import tweaks
from . import optimizer as opt
import itertools
import time
from datetime import datetime
import pandas as pd
class GeneticOperators:
"""
Basic routines for genetic algorithms. This class contains a phenotype composed by a dictionary
of <key>:<distribution> pairs.
"""
def __init__(self, graph: [hgg.Graph, dict]):
"""
Init the genetic basic routines by getting a phenotype from either a graph or a dictionary.
:param graph: The graph used to initialize the phenotype.
"""
self.phenotype = {}
if graph is not None:
self.init_phenotype(graph)
def init_phenotype(self, graph_or_config_ranges: [hgg.Graph, dict]):
self.phenotype = hgg.Graph.copy_tweaks_config(graph_or_config_ranges)
@staticmethod
def _sample_distr_tweak(d):
if isinstance(d, tweaks.Distribution):
return d.sample()
return d
def create_population(self, size=None) -> list:
"""
Create and return a population of individuals.
:param size: The number of individuals in the population
:return:
"""
sample_f = lambda: dict([(k, self._sample_distr_tweak(d)) for k, d in self.phenotype.items()])
if size is None:
return sample_f()
return [sample_f() for _ in range(size)]
def _select_genes_from_parents(self, parents, selectors):
return dict([(gene_key, parents[idx][gene_key]) for idx, gene_key in zip(selectors, self.phenotype.keys())])
def crossover_uniform_multi_parents(self, parents) -> dict:
"""
Given a number of individuals (considered parents), return a new individual which is the result of the
crossover between the parents' genes.
:param parents:
:return: The created individual
"""
if len(parents) < 2:
raise ValueError("At least two parents are necessary to crossover")
return self._select_genes_from_parents(parents,
np.random.randint(low=0, high=len(parents), size=len(self.phenotype)))
def crossover_uniform(self, parents):
"""
Given two parents, return two new individuals. These are the result of the
crossover between the parents' genes.
:param parents:
:return: A list with two individuals
"""
if len(parents) != 2:
raise ValueError("Two parents are necessary to crossover")
phe = self.phenotype
selectors = np.random.randint(low=0, high=2, size=len(phe))
f = self._select_genes_from_parents
return [f(parents, selectors), f(parents, -selectors+1)]
@staticmethod
def _select_genes_by_name(source, keys):
return [(k, source[k]) for k in keys]
def crossover_1point(self, parents):
"""
Given two parents, return two new individuals. These are the result of the 1-point
crossover between the parents' genes.
:param parents:
:return: A list with two individuals
"""
# TODO k-point cross over
keys = list(self.phenotype.keys())
assert len(keys) > 2
k = np.random.randint(low=1, high=len(keys)-1)
keys = (keys[:k], keys[k:])
f = self._select_genes_by_name
return [
dict(f(parents[0], keys[0]) + f(parents[1], keys[1])),
dict(f(parents[1], keys[0]) + f(parents[0], keys[1]))
]
def mutations(self, individual, prob):
"""
Apply mutations to the provided individual. Every gene has the same probability of being mutated.
:param individual: The individual as instance of class Individual or a dictionary containing its tweaks
:param prob: Gene mutation probability. If this parameter is a callable then it has the form
func(keys, size=None) and it is used to specify a custom probability for each gene.
:return: The mutated individual as a dict of tweaks
"""
# *** groups_prob removed ***
# if callable(prob) and groups_prob is not None:
# raise ValueError('callable prob and groups probabilities are mutual exclusive')
phe = self.phenotype
# def get_custom_prob(key_):
# g = phe[key_].group
# return prob if g is None else groups_prob.get(g, prob)
if isinstance(individual, opt.Individual):
individual = individual.gene
individual = dict(individual)
def is_distr_not_aggr(item):
if isinstance(item[1], tweaks.Aggregation):
return False
return isinstance(item[1], tweaks.Distribution)
# select items with distributions
# gene_keys = filter(lambda it: isinstance(it[1], tweaks.Distribution), phe.items())
gene_keys = filter(is_distr_not_aggr, phe.items())
gene_keys = map(lambda it: it[0], gene_keys)
gene_keys = np.array(list(gene_keys))
if callable(prob):
# prob is callable, so we get specific probabilities by key
prob = prob(gene_keys)
probs = prob
# if groups_prob is None:
# probs = prob
# else:
# probs = list(map(get_custom_prob, gene_keys))
selection = np.where(np.random.uniform(size=len(gene_keys)) < probs)
gene_keys = gene_keys[selection]
for key in gene_keys:
individual[key] = phe[key].sample()
# *** special handling for tweaks of type Aggregation ***
gene_keys = filter(lambda it: isinstance(it[1], tweaks.Aggregation), phe.items())
gene_keys = map(lambda it: it[0], gene_keys)
gene_keys = np.array(list(gene_keys))
probs = itertools.repeat(prob)
# if groups_prob is None:
# probs = itertools.repeat(prob)
# else:
# probs = list(map(get_custom_prob, gene_keys))
for key, p in zip(gene_keys, probs):
aggr = phe[key]
if callable(p):
p = p((key, ), size=aggr.size)[0]
individual[key] = aggr.mutation(current_value=individual[key], prob=p)
return individual
def mdesm_mutation(self, individuals, mf_range=(0.1, 0.8)):
"""
MICRO-DIFFERENTIAL EVOLUTION USING VECTORIZED RANDOM MUTATION FACTOR, mutation
:param individuals: A list containing three dictionaries corresponding to the configurations of
three individuals.
:param mf_range: Mutation factor range.
:return:
"""
if len(individuals) != 3:
raise ValueError('Three individuals needed')
phe = self.phenotype
types = map(type, phe.values())
if tweaks.Aggregation in types:
raise ValueError('Aggregation tweaks not supported by mdesm_mutation')
# TODO support aggregations!
if not all(map(lambda t: isinstance(t, (tweaks.Uniform, tweaks.Normal)), types)):
raise ValueError('Some tweak type is not supported by this mutation')
individuals = | pd.DataFrame.from_dict(individuals) | pandas.DataFrame.from_dict |
#! python3
# -*- coding: utf-8 -*-
"""
XML Converter - Converts XML file to Excel data sheet based on tags required
"""
import os
import sys
import time
import logging
import openpyxl
import subprocess
import pandas as pd
from datetime import datetime
import xml.etree.ElementTree as ET
if xml_filename.endswith(('.xml', '.XML', '.txt', '.TXT')):
if os.path.exists(xml_filename):
print("XML file selection success")
os.chdir(os.path.dirname(xml_filename))
xl_path = "Export-" + datetime.now().strftime('%m%d%y-%H%M%S%f') + ".xlsx"
try:
Prog_start_time = time.time()
status = ProcessXML(xml_filename)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
lineNo = str(exc_tb.tb_lineno)
print('Error: %s : %s at Line %s.' % (type(e), e, lineNo))
Prog_run_time = (time.time() - Prog_start_time)
Prog_run_time = "{0:.2f}".format(Prog_run_time)
print("Script run time = %s seconds" % Prog_run_time)
def ParseXml(XmlFile):
'''
Parse any XML File or XML content and get the output as a list of
(ListOfTags, ListOfValues, ListOfAttribs)
Tags will only have the sub child Tag names and not the parent names.
Attributes will be list of dictionary items.
'''
try:
if os.path.exists(XmlFile):
tree = ET.parse(XmlFile)
root = tree.getroot()
logging.inf('Trying to Parse %s' % XmlFile)
else:
root = ET.fromstring(XmlFile)
# Use the below to print & see if XML is actually loaded
# print(etree.tostring(tree, pretty_print=True))
ListOfTags, ListOfValues, ListOfAttribs = [], [], []
for elem in root.iter('*'):
Tag = elem.tag
if ('}' in Tag):
Tag = Tag.split('}')[1]
ListOfTags.append(Tag)
value = elem.text
if value is not None:
ListOfValues.append(value)
else:
ListOfValues.append('')
attrib = elem.attrib
if attrib:
ListOfAttribs.append([attrib])
else:
ListOfAttribs.append([])
# print(ListOfTags, ListOfValues, ListOfAttribs)
print('XML File content parsed successfully')
print('XML File content parsed successfully')
return (ListOfTags, ListOfValues, ListOfAttribs)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
lineNo = str(exc_tb.tb_lineno)
print('Error while parsing XMLs : %s : %s at Line %s.' % (type(e), e, lineNo))
return ([], [], [])
def GetAttributeValues(ListOfTags, ListOfValues, ListOfAttribs):
try:
ListOfAttrs = []
for index, Attributes in enumerate(ListOfAttribs):
if Attributes:
if type(Attributes) == list:
Dictionary = Attributes[0]
elif type(Attributes) == dict:
Dictionary = Attributes
ListOfAtriValues = []
for index, Attributes in enumerate(ListOfAttribs):
if Attributes:
if type(Attributes) == list:
Dictionary = Attributes[0]
elif type(Attributes) == dict:
Dictionary = Attributes
DictLength = len(Dictionary)
if DictLength == 1:
for key, value in Dictionary.items():
# To separate the remaining Keys and Values into two columns
ListOfAttrs.append(key)
ListOfAtriValues.append([value])
elif DictLength > 1:
for key, value in Dictionary.items():
# To separate the remaining Keys and Values into two columns
ListOfAttrs.append(key)
ListOfAtriValues.append([value])
for i in range(len(Dictionary) - 1):
ListOfTags[index:index] = [ListOfTags[index]]
ListOfValues[index:index] = [ListOfValues[index]]
else:
ListOfAtriValues.append([])
ListOfAttrs.append('')
print('Attributes and values successfully extracted.')
print('Attributes and values successfully extracted.')
while len(ListOfAttrs) < len(ListOfTags):
ListOfAttrs.append('')
while len(ListOfAtriValues) < len(ListOfValues):
ListOfAtriValues.append([])
return ListOfTags, ListOfValues, ListOfAttrs, ListOfAtriValues
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
lineNo = str(exc_tb.tb_lineno)
print('Error while extracting Attribute values : %s : %s at Line %s.' % (type(e), e, lineNo))
return []
def exportExcel(dictionary, columnsList, xl_path):
df = pd.DataFrame.from_dict(dictionary, orient='columns')[columnsList]
# convert to excel file
writer = pd.ExcelWriter(xl_path, engine='xlsxwriter')
df.to_excel(writer, index=False)
writer.save()
writer.close()
print('Excel Exported successfully at %s' % xl_path)
def makeExcel(ListOfTags, ListOfValues, ListOfAttribs, ListOfAtriValues):
xl_path = "Export-" + datetime.now().strftime('%m%d%y%H%M%S%f') + ".xlsx"
# Make each column separately
df1 = pd.DataFrame(ListOfTags, columns=['XML Tag Names'])
df2 = pd.DataFrame(ListOfValues, columns=['XML Tag Values'])
df3 = pd.DataFrame(ListOfAttribs, columns=['Attribute Names'])
df4 = | pd.DataFrame(ListOfAtriValues, columns=['Attribute Values']) | pandas.DataFrame |
"""
sbu.dataframe
=============
A module which handles data parsing and DataFrame construction.
Index
-----
.. currentmodule:: sbu.dataframe
.. autosummary::
get_sbu
parse_accuse
get_date_range
construct_filename
_get_datetimeindex
_parse_date
_get_total_sbu_requested
API
---
.. autofunction:: get_sbu
.. autofunction:: parse_accuse
.. autofunction:: get_date_range
.. autofunction:: construct_filename
.. autofunction:: _get_datetimeindex
.. autofunction:: _parse_date
.. autofunction:: _get_total_sbu_requested
"""
import re
from subprocess import check_output
from datetime import date
from typing import Tuple, Optional, Union
import numpy as np
import pandas as pd
from sbu.globvar import ACTIVE, PROJECT, SBU_REQUESTED
__all__ = [
'get_date_range', 'construct_filename', 'get_sbu', 'parse_accuse'
]
def get_sbu(
df: pd.DataFrame,
project: str,
start: Union[None, str, int] = None,
end: Union[None, str, int] = None,
) -> None:
"""Acquire the SBU usage for each account in the :attr:`pandas.DataFrame.index`.
The start and end of the reported interval can, optionally, be altered with **start**
and **end**.
Performs an inplace update of **df**, adding new columns to hold the SBU usage per month under
the ``"Month'`` super-column.
In addition, a single row and column is added (``"sum"``) with SBU usage summed over
the entire interval and over all users, respectively.
Parameters
----------
df : :class:`pandas.DataFrame`
A Pandas DataFrame with usernames and information, constructed by :func:`yaml_to_pandas`.
:attr:`pandas.DataFrame.columns` and :attr:`pandas.DataFrame.index` should
be instances of :class:`pandas.MultiIndex` and :class:`pandas.Index`, respectively.
User accounts are expected to be stored in :attr:`pandas.DataFrame.index`.
SBU usage (including the sum) is stored in the ``"Month"`` super-column.
start : :class:`int` or :class:`str`, optional
Optional: The starting year of the interval.
Defaults to the current year if ``None``.
end : :class:`str` or :class:`int`, optional
Optional: The final year of the interval.
Defaults to current year + 1 if ``None``.
project : :class:`str`, optional
Optional: The project code of the project of interest.
If not ``None``, only SBUs expended under this project are considered.
"""
# Construct new columns in **df**
sy, ey = get_date_range(start, end)
date_range = _get_datetimeindex(sy, ey)
for i in date_range:
df[('Month', str(i)[:7])] = np.nan
df_tmp = parse_accuse(project, sy, ey)
df.update(df_tmp)
# Calculate SBU sums
SUM = ('Month', 'sum')
df[SUM] = df['Month'].sum(axis=1)
df.loc['sum'] = np.nan
df.loc['sum', 'Month'] = df['Month'].sum(axis=0).values
df.at['sum', PROJECT] = 'sum'
df.at['sum', SBU_REQUESTED] = _get_total_sbu_requested(df)
# Mark all active users
df[ACTIVE] = False
df.loc[df[SUM] > 1.0, ACTIVE] = True
DATE_PATTERN = re.compile("([0-9]+)-([0-9][0-9])-?([0-9][0-9])?")
def parse_accuse(project: str, start: Optional[str] = None, end: Optional[str] = None) -> pd.DataFrame:
"""Gather SBU usage of a specific user account.
The bash command ``accuse`` is used for gathering SBU usage along an interval defined
by **start** and **end**.
Results are collected and returned in a Pandas DataFrame.
Parameters
----------
project : :class:`str`
The project code of the project of interest.
start : :class:`str`
The starting date of the interval.
Accepts dates formatted as YYYY, MM-YYYY or DD-MM-YYYY.
end : :class:`str`
The final date of the interval.
Accepts dates formatted as YYYY, MM-YYYY or DD-MM-YYYY.
Returns
-------
:class:`pandas.DataFrame`
The SBU usage of **user** over a specified period.
"""
# Acquire SBU usage
arg = ['accuse', '-a', project]
if start is not None:
arg += ["-s", start]
if end is not None:
arg += ["-e", end]
usage = check_output(arg).decode('utf-8')
usage_list = []
for i in usage.splitlines():
try:
month, *fields = i.split()
except ValueError:
continue
if DATE_PATTERN.fullmatch(month):
usage_list.append((month, *fields))
df = | pd.DataFrame(usage_list, columns=["Month", "Account", "User", "SBUs", "Restituted"]) | pandas.DataFrame |
'''
Created on Jan 4, 2017
@author: bardya
'''
import scipy as sp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import csv
import random
import re
#Starting from a Matrix with 1/0 indicating presence/absence of a taxon in a orthologous group
#With OMA the thresholds for pairwise orthology detection are:
# Alignment Length minimum 61% of the shorter Sequence
# Alignment Score minimum 183
# Stable Pair Tolerance Value 1.53
# Minimum Sequence Length: 40aa
def readCsvToMap(filepath, fieldnames, delimiter='\t'):
fh = open(filepath, 'r')
fh_filtered = [row for row in fh if not row.startswith('#')]
assert len(fieldnames) >= 2
reader = csv.DictReader(fh_filtered, fieldnames=fieldnames, delimiter=delimiter)
xtoymap = {}
if len(fieldnames) == 2:
for linedict in reader:
for k in list(linedict.keys()):
if k == None or k == '':
del linedict[k]
continue
linedict[k] = linedict[k].strip()
xtoymap[linedict[fieldnames[0]]] = '\t'.join(linedict[f] for f in fieldnames[1:])
if len(fieldnames) == 3:
for linedict in reader:
if not linedict[fieldnames[0]] in xtoymap.keys():
xtoymap[linedict[fieldnames[0]]] = {linedict[fieldnames[1]]: linedict[fieldnames[2]]} #.split(' ', 1)[0]
else:
#add this to existing subdict
xtoymap[linedict[fieldnames[0]]][linedict[fieldnames[1]]] = linedict[fieldnames[2]]
return xtoymap
def rarefaction_simultaneous(presabs_df, taxonset):
taxonset = list(taxonset) #make sure it is a list
random.shuffle(taxonset) #works in-place
presabs_df = presabs_df[taxonset] #reordered and filtered
presabs_df = presabs_df.dropna(how='all') #delete all rows with no content
size_vector_pan = ['pan']
size_vector_cor = ['cor']
size_vector_sin = ['sin']
joined_df_pan = pd.DataFrame()
joined_df_cor = pd.DataFrame()
joined_df_sin = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import math
import api.recrudescence_utils as recrudescence_utils
def define_alleles(genotypedata: pd.DataFrame, locirepeats: np.ndarray, maxk: np.ndarray):
'''
Generate definitions of alleles (i.e. binning)
Raw allele lengths are converted and separated into small allele classes.
And when it does, sometimes the data are reported as intermediate values.
This method resolves (bins) the intermediate values.
:param genotypedata:
type: pandas dataframe
description: genetic data, where first column (name 'Sample ID') has the id of the sample, and rest of columns have the format nameoflocus_X, where X is the xth allele detected
:param locirepeats:
type: numpy.array
description: a vector of length number of loci with type of locus (dinucleotide, trinucleotide, etc. repeats)
:param maxk:
type: numpy.array
description: a vector of length of loci with the maximum number of alleles for each locus
:return:
type: list that contains dataframe
description: list of length number of loci, each entry is a number of alleles by 2 matrix (1st column = lower bound, 2nd column = upper bound)
'''
locinames = recrudescence_utils.get_locinames(genotypedata)
nloci = len(locinames)
# first section
alleles = _getAlleles(genotypedata, nloci, locirepeats, locinames)
# second section
compressed_alleles = _compress(alleles, nloci, maxk)
return compressed_alleles
def _getAlleles(genotypedata: pd.DataFrame, nloci: int, locirepeats: np.ndarray, locinames: dict) -> list:
'''
Returns a list that contains all lower bound, upper bound,
and the counts of raw alleles between lower and upper bound values
after binning the raw allele data
:param genotypedata: The data table to retrieve allele values
:param nloci: The number of loci
:param locirepeats: a vector of length number of loci with type of locus
:param locinames: The dictionary that contains all locus prefix and its columns from data table
'''
alleles = []
current_column = 0
for i in range(nloci):
# retrieve raw alleles (each index contains every raw alleles data with the same locinames)
# ex. all data with X313 prefix lociname in index 0
loci_name_prefix, last_column = locinames.get(i)
raw_alleles, current_column = recrudescence_utils.get_RawAlleles(genotypedata, current_column, last_column)
if (max(raw_alleles) - min(raw_alleles)) < locirepeats[i]:
# find break values(lower and upper)
lower_break_value, upper_break_value, counts_column = _findBreakValues(raw_alleles, locirepeats, i)
alleles = _prepareDataframes(alleles, lower_break_value, upper_break_value, counts)
else:
lower_break_value, upper_break_value, counts = _binRawAlleleValues(raw_alleles, locirepeats, i)
alleles = _prepareDataframes(alleles, lower_break_value, upper_break_value, counts)
return alleles
def _findBreakValues(raw_alleles: list, locirepeats: np.ndarray, i: int):
'''
Find the correct lower bound value and upper bound value given raw allele values
'''
lower_break_value = []
upper_break_value = []
counts_column = []
lower_list = []
upper_list = []
for allele in raw_alleles:
lower_list.append(allele - locirepeats[i]/2)
upper_list.append(allele + locirepeats[i]/2)
# search for the min from the lower_list and upper_list and add to break lists.
lower_break_value.append(min(lower_list))
upper_break_value.append(max(upper_list))
counts_column.append(len(lower_list))
return lower_break_value, upper_break_value, counts_column
def _binRawAlleleValues(raw_alleles: list, locirepeats: np.ndarray, i: int):
'''
Find the correct lower bound value and upper bound value given raw allele values
This method happens when there is intermediate raw allele values to bin
'''
# making breaks (not sure if we need this)
min_num = math.floor(min(raw_alleles)) - 0.5
max_num = max(raw_alleles) + 1
breaks = np.array([])
while min_num < max_num:
breaks = np.append(breaks, min_num)
min_num += 1
breaks_min = math.floor(min(breaks))
breaks_max = math.floor(max(breaks))
# allele values
allele_values = np.array(np.round((np.array(breaks[1:]) + np.array(breaks[0:-1])) / 2))
# historgram contains the frequency of occurrences for each breaks
histogram = {(k+0.5): 0 for k in range(breaks_min, breaks_max)}
for allele in raw_alleles:
bound = math.floor(allele) + 0.5
if allele > bound:
histogram[bound] += 1
else:
histogram[bound-1] += 1
# hist_alleles_count
# list that contains the count for each break
hist_alleles_count = list(histogram.values())
# list that contains sum of 'count' from the hist_alleles_count
# increment 'x' index of the hist_alleles_count by locirepeats[j] to select 'count'
counts_by_offset = []
for j in range(locirepeats[i]):
seq = list(range(j, len(hist_alleles_count), locirepeats[i]))
selected_count = []
for num in seq:
selected_count.append(hist_alleles_count[num])
sum = 0
for num in selected_count:
sum += num
counts_by_offset.append(sum)
# select certain allele values based on the raw alleles, counts_by_offset
seq = list(range(counts_by_offset.index(max(counts_by_offset)), len(allele_values), locirepeats[i]))
possible_alleles = []
for num in seq:
possible_alleles.append(allele_values[num])
if min(raw_alleles) <= (min(possible_alleles) - locirepeats[i]/2):
possible_alleles = [min(possible_alleles) - locirepeats[i]] + possible_alleles
if max(raw_alleles) > (max(possible_alleles) + locirepeats[i]/2):
possible_alleles = possible_alleles + [max(possible_alleles) + locirepeats[i]]
# assign clusters
clusters = []
for allele in raw_alleles:
arr = np.array(possible_alleles) - allele
arr = abs(arr).tolist()
min_index = arr.index(min(arr))
clusters.append(min_index)
unique_clusters = list(dict.fromkeys(clusters))
k = len(unique_clusters)
# find break values(lower and upper)
lower_break_value = []
upper_break_value = []
for cluster in unique_clusters:
lower_break_value.append(possible_alleles[cluster] - locirepeats[i]/2)
upper_break_value.append(possible_alleles[cluster] + locirepeats[i]/2)
lower_break_value = sorted(lower_break_value)
upper_break_value = sorted(upper_break_value)
counts = []
for j in range(len(lower_break_value)):
sum = 0
for allele in raw_alleles:
if allele > lower_break_value[j] and allele <= upper_break_value[j]:
sum += 1
counts.append(sum)
return lower_break_value, upper_break_value, counts
def _prepareDataframes(alleles: list, lower_break_value: list, upper_break_value: list, counts: list):
'''
Turn all lists of alleles information to pandas dataframe
'''
# prepare columns of lower_bound, upper_bound, and count
allele_low = pd.DataFrame(lower_break_value)
allele_high = | pd.DataFrame(upper_break_value) | pandas.DataFrame |
import pandas as pd
from tqdm import tqdm
def _merge_datasets():
rating_count = 0.0
dfs = []
for file_name in [
"jester-data-1.xls",
"jester-data-2.xls",
"jester-data-3.xls",
"FINAL jester 2006-15.xls",
"[final] April 2015 to Nov 30 2019 - Transformed Jester Data - .xlsx",
]:
df = pd.read_excel(f"jester/{file_name}", header=None).reset_index(drop=True)
for joke_index in range(101, 159):
if joke_index not in list(df.columns):
df[joke_index] = 99
y = df.pop(0)
_rating_count = y.sum()
rating_count += _rating_count
print(df.shape, _rating_count, rating_count)
dfs.append(df)
df = pd.concat(dfs).reset_index(drop=True)
df.to_pickle("jester/full_df.pickle")
def _convert_to_sparse_form():
# user: 136025
# joke: 158
# instance: 6085247
# density: 6085247 / (136025 * 158) = 0.283140757
df = | pd.read_pickle("jester/full_df.pickle") | pandas.read_pickle |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019, <NAME> <akoenzen | uvic.ca>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import pandas as pd
import requests as rq
from colorama import Back, Style
class NBClassification(object):
def __init__(self, label: str, value: float = 0.0):
self.label: str = label
self.value: float = value
def __repr__(self):
return "{0}<{1}>".format(self.label, self.value)
class NBTerm(object):
def __init__(self, term: str, likelihood: float = 0.0):
self.term: str = term.lower().strip()
self.likelihood: float = likelihood
def __repr__(self):
return "{0}<{1}>".format(self.term, self.likelihood)
class NBDocument(object):
USE_FILTERED: bool = False
def __init__(self, raw_terms: [NBTerm], filtered_terms: [NBTerm]):
self.raw_terms: [NBTerm] = raw_terms # stopwords included
self.filtered_terms: [NBTerm] = filtered_terms # stopwords removed
def __repr__(self):
str = "\t\t\tTerms: {}\n".format(len(self.get_terms()))
for t in self.get_terms():
str += "\t\t\t{}\n".format(t)
return str
def get_terms(self):
if NBDocument.USE_FILTERED:
return self.filtered_terms
else:
return self.raw_terms
class NBClass(object):
def __init__(self, label: str):
self.label: str = label
self.documents: [NBDocument] = []
self.prior: float = 0.0
self.likelihoods: [NBTerm] = []
self.name: str = ""
if self.label == '0':
self.name = 'Wise Saying'
elif self.label == '1':
self.name = 'Future'
def __repr__(self):
str = "\tClass Label: {}\n".format(self.label)
str += "\tDocuments: {}\n".format(len(self.documents))
for d in self.documents:
str += "\t\t{}\n".format(d)
str += "\tPrior: {}\n".format(self.prior)
str += "\tLikelihoods: {}\n".format(len(self.likelihoods))
for l in self.likelihoods:
str += "\t\t{}\n".format(l)
return str
def add_create_document(self, message: str) -> None:
# break the document into terms
terms = message.split(' ')
raw_terms = [NBTerm(term=t) for t in terms]
filtered_terms = raw_terms # legacy, no use
self.documents.append(NBDocument(raw_terms=raw_terms, filtered_terms=filtered_terms))
def compute_likelihood(self, lexicon: [str]) -> None:
# this will include ALL terms in the class, INCLUDED repeated terms!!!
class_terms = [t.term for d in self.documents for t in d.get_terms()] # ALL TERMS!!!
# now for each term in lexicon compute its likelihood and add to the list of likelihoods
# likelihood = occurrences of term / all terms
for t in lexicon:
# compute numerator. add 1 to avoid the zero-frequency problem
numerator = class_terms.count(t) + 1
# compute denominator. add count of lexicon to avoid zero-frequency problem
denominator = len(class_terms) + len(lexicon)
# add to the likelihood list IF not present
flag = False
for e in self.likelihoods:
if e.term == t:
flag = True
if not flag:
self.likelihoods.append(NBTerm(term=t, likelihood=(numerator / denominator)))
def get_likelihood(self, term: str) -> None:
for e in self.likelihoods:
if e.term == term:
return e.likelihood
def get_class_lexicon(self) -> [str]:
lexicon = []
for d in self.documents:
for t in d.get_terms():
if t.term not in lexicon:
lexicon.append(t.term)
return lexicon
@staticmethod
def get_class_name(label: str):
if label == '0':
return 'Wise Saying'
elif label == '1':
return 'Future'
return 'None'
class NBModel(object):
DEBUG = False
def __init__(self):
self.classes: [NBClass] = []
self.lexicon: [str] = [] # vocabulary of UNIQUE words in ALL documents
def __repr__(self):
str = "Classes: {}\n".format(len(self.classes))
for c in self.classes:
str += "{}\n".format(c)
str += "Lexicon: {}\n".format(len(self.lexicon))
str += "{}".format(sorted(self.lexicon))
return str
def get_class(self, label: str) -> NBClass:
for c in self.classes:
if c.label == label:
return c
return None
def calculate_and_update_prior(self, label: str) -> None:
N_c = float(len(self.get_class(label=label).documents)) # number of docs in class
N = 0.0 # number of docs in all classes
for c in self.classes:
N += len(c.documents)
# update prior
self.get_class(label=label).prior = N_c / N
# +++ DEBUG
if NBModel.DEBUG:
print("PRIOR for class {0} is {1}.".format(label, N_c / N))
print("N_c: {0}, N: {1}".format(N_c, N))
def compute_lexicon(self) -> None:
# vocabulary should NOT contain duplicates
for c in self.classes:
for d in c.documents:
for t in d.get_terms():
if t.term not in self.lexicon:
self.lexicon.append(t.term)
def compute_likelihood(self) -> None:
for c in self.classes:
c.compute_likelihood(lexicon=self.lexicon)
class NaiveBayesTextClassifier(object):
"""
Text classifier using the Naïve Bayes Classifier. This classifier supports only 2 classes, so it's a
binary classifier.
"""
DEBUG = False
SHOW_MODEL = False
MAKE_SUBSET_FOR_TRAINING = False
TRAINING_SUBSET_SIZE = 2
MAKE_SUBSET_FOR_TESTING = False
TESTING_SUBSET_SIZE = 2
def __init__(self):
self.model: NBModel = NBModel()
pass
def train(self, training_set: [str] = [], debug: bool = False) -> NBModel:
# parse the training data and labels and convert them into pandas Series
training_data = rq.get(
'http://www.apkc.net/data/csc_578d/assignment01/problem04/traindata.txt'
).text.splitlines()
if training_data is not None:
t_data_series = pd.Series(training_data)
training_labels = rq.get(
'http://www.apkc.net/data/csc_578d/assignment01/problem04/trainlabels.txt'
).text.splitlines()
if training_labels is not None:
t_labels_series = pd.Series(training_labels)
# combine both series into a DataFrame
t_data_matrix = pd.DataFrame({
'message': t_data_series,
'label': t_labels_series
})
# make a custom subset of the entire training set for debugging purposes
if NaiveBayesTextClassifier.MAKE_SUBSET_FOR_TRAINING:
_0_messages = t_data_matrix.loc[
t_data_matrix.label == '0',
'message'][0:NaiveBayesTextClassifier.TRAINING_SUBSET_SIZE
]
_0_labels = ['0' for _ in _0_messages]
_1_messages = t_data_matrix.loc[
t_data_matrix.label == '1',
'message'][0:NaiveBayesTextClassifier.TRAINING_SUBSET_SIZE
]
_1_labels = ['1' for _ in _1_messages]
# replace the DataFrame
t_data_matrix = pd.DataFrame({
'message': pd.concat([
pd.Series(list(_0_messages)),
pd.Series(list(_1_messages))
]),
'label': pd.concat([
pd.Series(_0_labels),
| pd.Series(_1_labels) | pandas.Series |
import pandas as pd
import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import pickle
from sklearn.metrics import r2_score
import warnings
from scipy.interpolate import interp1d
import numpy as np
__author__ = '<NAME>, <NAME>'
__copyright__ = '© Pandemic Central, 2021'
__license__ = 'MIT'
__status__ = 'release'
__url__ = 'https://github.com/solveforj/pandemic-central'
__version__ = '3.0.0'
pd.set_option('display.max_rows', 500)
| pd.set_option('display.max_columns', 500) | pandas.set_option |
from __future__ import division
from itertools import chain
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import pandas as pd
from fisher import pvalue
import re
import collections
from nltk.stem.porter import PorterStemmer
import math
from percept.tasks.base import Task
from percept.fields.base import Complex, List, Dict, Float
from inputs.inputs import SimpsonsFormats
from percept.utils.models import RegistryCategories, get_namespace
from percept.conf.base import settings
import os
from percept.tasks.train import Train
from sklearn.ensemble import RandomForestClassifier
import pickle
import random
import logging
log = logging.getLogger(__name__)
MAX_FEATURES = 500
DISTANCE_MIN=1
CHARACTER_DISTANCE_MIN = .2
RESET_SCENE_EVERY = 5
def make_df(datalist, labels, name_prefix=""):
df = pd.DataFrame(datalist).T
if name_prefix!="":
labels = [name_prefix + "_" + l for l in labels]
labels = [l.replace(" ", "_").lower() for l in labels]
df.columns = labels
df.index = range(df.shape[0])
return df
def return_one():
return 1
class SpellCorrector(object):
"""
Taken and slightly adapted from peter norvig's post at http://norvig.com/spell-correct.html
"""
alphabet = 'abcdefghijklmnopqrstuvwxyz'
punctuation = [".", "!", "?", ","]
def __init__(self):
self.NWORDS = self.train(self.words(file(os.path.join(settings.PROJECT_PATH,'data/big.txt')).read()))
self.cache = {}
def words(self, text):
return re.findall('[a-z]+', text.lower())
def train(self, features):
model = collections.defaultdict(return_one)
for f in features:
model[f] += 1
return model
def edits1(self, word):
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in splits if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]
replaces = [a + c + b[1:] for a, b in splits for c in self.alphabet if b]
inserts = [a + c + b for a, b in splits for c in self.alphabet]
return set(deletes + transposes + replaces + inserts)
def known_edits2(self, word):
return set(e2 for e1 in self.edits1(word) for e2 in self.edits1(e1) if e2 in self.NWORDS)
def known(self, words): return set(w for w in words if w in self.NWORDS)
def correct(self, word):
if word in self.cache:
return self.cache[word]
suffix = ""
for p in self.punctuation:
if word.endswith(p):
suffix = p
word = word[:-1]
candidates = self.known([word]) or self.known(self.edits1(word)) or self.known_edits2(word) or [word]
newword = max(candidates, key=self.NWORDS.get) + suffix
self.cache.update({word : newword})
return newword
class Vectorizer(object):
def __init__(self):
self.fit_done = False
def fit(self, input_text, input_scores, max_features=100, min_features=3):
self.spell_corrector = SpellCorrector()
self.stemmer = PorterStemmer()
new_text = self.batch_generate_new_text(input_text)
input_text = [input_text[i] + new_text[i] for i in xrange(0,len(input_text))]
self.vectorizer1 = CountVectorizer(ngram_range=(1,2), min_df = min_features/len(input_text), max_df=.4, stop_words="english")
self.vectorizer1.fit(input_text)
self.vocab = self.get_vocab(input_text, input_scores, max_features)
self.vectorizer = CountVectorizer(ngram_range=(1,2), vocabulary=self.vocab)
self.fit_done = True
self.input_text = input_text
def spell_correct_text(self, text):
text = text.lower()
split = text.split(" ")
corrected = [self.spell_corrector.correct(w) for w in split]
return corrected
def batch_apply(self, all_tokens, applied_func):
for key in all_tokens:
cor = applied_func(all_tokens[key])
all_tokens[key] = cor
return all_tokens
def batch_generate_new_text(self, text):
text = [re.sub("[^A-Za-z0-9]", " ", t.lower()) for t in text]
text = [re.sub("\s+", " ", t) for t in text]
t_tokens = [t.split(" ") for t in text]
all_token_list = list(set(chain.from_iterable(t_tokens)))
all_token_dict = {}
for t in all_token_list:
all_token_dict.update({t : t})
all_token_dict = self.batch_apply(all_token_dict, self.stemmer.stem)
all_token_dict = self.batch_apply(all_token_dict, self.stemmer.stem)
for i in xrange(0,len(t_tokens)):
for j in xrange(0,len(t_tokens[i])):
t_tokens[i][j] = all_token_dict.get(t_tokens[i][j], t_tokens[i][j])
new_text = [" ".join(t) for t in t_tokens]
return new_text
def generate_new_text(self, text):
no_punctuation = re.sub("[^A-Za-z0-9]", " ", text.lower())
no_punctuation = re.sub("\s+", " ", no_punctuation)
corrected = self.spell_correct_text(no_punctuation)
corrected = [self.stemmer.stem(w) for w in corrected]
new = " ".join(corrected)
return new
def get_vocab(self, input_text, input_scores, max_features):
train_mat = self.vectorizer1.transform(input_text)
input_score_med = np.median(input_scores)
new_scores = [0 if i<=input_score_med else 1 for i in input_scores]
ind_max_features = math.floor(max_features/max(input_scores))
all_vocab = []
all_cols = [np.asarray(train_mat.getcol(i).todense().transpose())[0] for i in xrange(0,train_mat.shape[1])]
for s in xrange(0,max(input_scores)):
sel_inds = [i for i in xrange(0,len(input_scores)) if input_scores[i]==s]
out_inds = [i for i in xrange(0,len(input_scores)) if input_scores[i]!=s]
pvalues = []
for i in xrange(0,len(all_cols)):
lcol = all_cols[i]
good_lcol = lcol[sel_inds]
bad_lcol = lcol[out_inds]
good_lcol_present = len(good_lcol[good_lcol > 0])
good_lcol_missing = len(good_lcol[good_lcol == 0])
bad_lcol_present = len(bad_lcol[bad_lcol > 0])
bad_lcol_missing = len(bad_lcol[bad_lcol == 0])
pval = pvalue(good_lcol_present, bad_lcol_present, good_lcol_missing, bad_lcol_missing)
pvalues.append(pval.two_tail)
col_inds = list(xrange(0,train_mat.shape[1]))
p_frame = pd.DataFrame(np.array([col_inds, pvalues]).transpose(), columns=["inds", "pvalues"])
p_frame = p_frame.sort(['pvalues'], ascending=True)
getVar = lambda searchList, ind: [searchList[int(i)] for i in ind]
vocab = getVar(self.vectorizer1.get_feature_names(), p_frame['inds'][:ind_max_features+2])
all_vocab.append(vocab)
return list(set(list(chain.from_iterable(all_vocab))))
def batch_get_features(self, text):
if not self.fit_done:
raise Exception("Vectorizer has not been created.")
new_text = self.batch_generate_new_text(text)
text = [text[i] + new_text[i] for i in xrange(0,len(text))]
return (self.vectorizer.transform(text).todense())
def get_features(self, text):
if not self.fit_done:
raise Exception("Vectorizer has not been created.")
itext=text
if isinstance(text, list):
itext = text[0]
new_text = self.generate_new_text(itext)
if isinstance(text, list):
text = [text[0] + new_text]
else:
text = [text + new_text]
return (self.vectorizer.transform(text).todense())
class FeatureExtractor(Task):
data = Complex()
row_data = List()
speaker_code_dict = Dict()
speaker_codes = List()
vectorizer = Complex()
data_format = SimpsonsFormats.dataframe
category = RegistryCategories.preprocessors
namespace = get_namespace(__module__)
help_text = "Cleanup simpsons scripts."
args = {'scriptfile' : os.path.abspath(os.path.join(settings.DATA_PATH, "script_tasks"))}
def train(self, data, target, **kwargs):
"""
Used in the training phase. Override.
"""
self.data = self.predict(data, **kwargs)
def predict(self, data, **kwargs):
"""
Used in the predict phase, after training. Override
"""
scriptfile = kwargs.get('scriptfile')
script_data = pickle.load(open(scriptfile))
script = script_data.tasks[2].voice_lines.value
speakers = []
lines = []
for s in script:
for (i,l) in enumerate(s):
if i>0:
previous_line = s[i-1]['line']
previous_speaker = s[i-1]['speaker']
else:
previous_line = ""
previous_speaker = ""
if i>1:
two_back_speaker = s[i-2]['speaker']
else:
two_back_speaker = ""
if len(s)>i+1:
next_line = s[i+1]['line']
else:
next_line = ""
current_line = s[i]['line']
current_speaker = s[i]['speaker']
lines.append(current_line)
speakers.append(current_speaker)
row_data = {
'previous_line' : previous_line,
'previous_speaker' : previous_speaker,
'next_line' : next_line,
'current_line' : current_line,
'current_speaker' : current_speaker,
'two_back_speaker' : two_back_speaker
}
self.row_data.append(row_data)
self.speaker_code_dict = {k:i for (i,k) in enumerate(list(set(speakers)))}
self.speaker_codes = [self.speaker_code_dict[s] for s in speakers]
self.max_features = math.floor(MAX_FEATURES)/3
self.vectorizer = Vectorizer()
self.vectorizer.fit(lines, self.speaker_codes, self.max_features)
prev_features = self.vectorizer.batch_get_features([rd['previous_line'] for rd in self.row_data])
cur_features = self.vectorizer.batch_get_features([rd['current_line'] for rd in self.row_data])
next_features = self.vectorizer.batch_get_features([rd['next_line'] for rd in self.row_data])
self.speaker_code_dict.update({'' : -1})
meta_features = make_df([[self.speaker_code_dict[s['two_back_speaker']] for s in self.row_data], [self.speaker_code_dict[s['previous_speaker']] for s in self.row_data], self.speaker_codes],["two_back_speaker", "previous_speaker", "current_speaker"])
#meta_features = make_df([[self.speaker_code_dict[s['two_back_speaker']] for s in self.row_data], self.speaker_codes],["two_back_speaker", "current_speaker"])
train_frame = pd.concat([pd.DataFrame(prev_features),pd.DataFrame(cur_features), | pd.DataFrame(next_features) | pandas.DataFrame |
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import model_selection as ms
from sklearn import preprocessing as sklpp
df = pd.DataFrame(pd.read_csv("/Users/brndy.747/Documents/Maths_I.A_Final/archive/data.csv"))
df = df.drop(['tempo','beats','filename','mfcc1','mfcc2','mfcc3','mfcc4','mfcc5','mfcc6','mfcc7','mfcc8','mfcc9','mfcc10','mfcc11','mfcc12','mfcc13','mfcc14','mfcc15','mfcc16','mfcc17','mfcc18','mfcc19','mfcc20'],axis=1)
# df = df.drop(['filename','tempo', 'beats', 'chroma_stft', 'rmse', 'spectral_centroid' , 'spectral_bandwidth', 'rolloff', 'zero_crossing_rate'],axis=1)
df = pd.DataFrame(df)
df.drop(df.index[800:1000],0,inplace=True) ##Removes Rock+Reggae Music
df.drop(df.index[0:100],0,inplace=True) ##Removes blues Music
df.drop(df.index[200:300],0,inplace=True) ##Removes disco Music
df.drop(df.index[200:300],0,inplace=True) ##Removes hip-hop Music
df.drop(df.index[100:200],0,inplace=True) ##Removes country Music
df.drop(df.index[100:200],0,inplace=True) ##Removes jazz Music
def normalize(df):
result = df.copy()
for feature_name in df.columns:
max_value = df[feature_name].max()
min_value = df[feature_name].min()
try:
result[feature_name] = (df[feature_name] - min_value) / (max_value - min_value)
except:
result[feature_name] = df[feature_name]
return result
better_norm_df = normalize(df)
better_norm_df['label'] = better_norm_df['label'].replace(['classical','metal','pop'],[0,1,2])
print(better_norm_df)
# pp = sns.pairplot(data=better_norm_df,
# x_vars=['chroma_stft', 'rmse', 'spectral_centroid' , 'spectral_bandwidth', 'rolloff', 'zero_crossing_rate'],
# y_vars=['chroma_stft', 'rmse', 'spectral_centroid' , 'spectral_bandwidth', 'rolloff', 'zero_crossing_rate'],
# hue='label')
# plt.show()
train, test = ms.train_test_split(better_norm_df, train_size=0.8 ,test_size=0.2, random_state=32, shuffle=True)
print("Train Dataset")
print(len(train))
print(train)
train_df = | pd.DataFrame(train) | pandas.DataFrame |
import pandas
u202 = pandas.read_csv("u202.csv", encoding="utf-8")
u203 = pandas.read_csv("u203.csv", encoding="utf-8")
u302 = pandas.read_csv("u302.csv", encoding="utf-8")
u202.dropna(inplace=True)
u203.dropna(inplace=True)
u302.dropna(inplace=True)
u202["místnost"] = "u202"
u203["místnost"] = "u203"
u302["místnost"] = "u302"
maturita = pandas.concat([u202, u203, u302], ignore_index=True)
# ## 3. Joinování dat
# Obdobou SQL příkazu `JOIN` je v Pandas funkce `merge`. K datasetu výsledků u maturitních zkoušek budeme joinovat data o předsedajících maturitních komisí v jednotlivých dnech.
preds = pandas.read_csv("predsedajici.csv", encoding="utf-8")
print(preds)
# Vyzkoušíme merge, zatím jen na místnosti u202.
print(u202)
test = pandas.merge(u202, preds)
print(test)
# Dostali jsme prázdný dataframe. To je proto, že defaultně `merge` dělá `INNER JOIN` přes všechny sloupce se stejnými jmény, zde `jméno` a `den`. Protože jedno `jméno` odpovídá studentovi a druhé předsedovi, nemáme žádný průnik.
#
# Nabízel by se `OUTER JOIN`, ale ten nepomůže.
test = pandas.merge(u202, preds, how="outer")
print(test)
# Tím se nám akorát promíchala jména studentů a předsedajících, navíc vznikla spousta nedefinovaných hodnot.
# Ve skutečnosti potřebujeme provést `JOIN` jen podle sloupce `den` -- ke každému dni známe předsedu komise a všechny studenty, kteří měli ten den zkoušku.
test = | pandas.merge(u202, preds, on="den") | pandas.merge |
# -*- coding: utf-8 -*-
# website: http://30daydo.com
# @Time : 2019/3/19 23:21
# @File : auto_trader.py
import datetime
import logging
import time
import pymongo
import easyquotation
import easytrader
import pandas as pd
from config import PROGRAM_PATH, MONGO_PORT, MONGO_HOST
from configure.settings import DBSelector
SELL = 7 # 配置为8%个点卖
DB = DBSelector()
class AutoTrader():
def __init__(self):
self.today = datetime.date.today().strftime('%Y-%m-%d')
# self.stock_candidates = self.get_candidates()
# self.stock_candidates = self.get_candidates()
self.logger = self.llogger('log/auto_trader_{}'.format(self.today))
self.logger.info('程序启动')
self.user = easytrader.use('gj_client')
# self.user = easytrader.use('ths')
self.user.prepare('user.json')
# self.user.connect(PROGRAM_PATH)
# self.blacklist_bond = self.get_blacklist()
# self.q=easyquotation.use('qq')
self.yesterday = datetime.datetime.now() + datetime.timedelta(days=-1)
# 如果是周一 加一个判断
self.yesterday = self.yesterday.strftime('%Y-%m-%d')
def get_close_price(self):
conn = DB.get_mysql_conn('db_jisilu', 'qq')
cursor = conn.cursor()
cmd = 'select 可转债代码,可转债价格 from `tb_jsl_{}`'.format(self.yesterday)
try:
cursor.execute(cmd)
result = cursor.fetchall()
except Exception as e:
return None
else:
d = {}
for item in result:
d[item[0]] = item[1]
return d
# 设置涨停 附近卖出 挂单
def set_ceiling(self):
position = self.get_position()
# print(position)
code_price = self.get_close_price()
for each_stock in position:
try:
code = each_stock.get('证券代码')
amount = int(each_stock.get('可用余额', 0))
if amount <= 0.1:
continue
close_price = code_price.get(code, None)
buy_price = round(close_price * (1 + SELL * 0.01), 1)
self.user.sell(code, price=buy_price, amount=amount)
except Exception as e:
self.logger.error(e)
# 获取候选股票池数据
def get_candidates(self):
stock_candidate_df = pd.read_sql(
'tb_stock_candidates', con=self.engine)
stock_candidate_df = stock_candidate_df.sort_values(by='可转债价格')
return stock_candidate_df
def get_market_data(self):
market_data_df = pd.read_sql('tb_bond_jisilu', con=self.engine)
return market_data_df
# 永远不买的
def get_blacklist(self):
black_list_df = pd.read_sql('tb_bond_blacklist', con=self.engine)
return black_list_df['code'].values
# 开盘前统一下单
def morning_start(self, p):
# print(self.user.balance)
codes = self.stock_candidates['可转债代码']
prices = self.stock_candidates['可转债价格']
code_price_dict = dict(zip(codes, prices))
count = 0
while 1:
count += 1
logging.info('Looping {}'.format(count))
for code, price in code_price_dict.copy().items():
# 价格设定为昨天收盘价的-2%
if code not in self.blacklist_bond:
# buy_price=round(price*0.98,2)
deal_detail = self.q.stocks(code)
close = deal_detail.get(code, {}).get('close') # 昨日收盘
ask = deal_detail.get(code, {}).get('ask1') # 卖一
bid = deal_detail.get(code, {}).get('bid1') # 买一价
current_percent = (ask - close) / close * 100
# print(current_percent)
if current_percent <= p:
self.logger.info('>>>>代码{}, 当前价格{}, 开盘跌幅{}'.format(code, bid, current_percent))
try:
print('code {} buy price {}'.format(code, ask))
self.user.buy(code, price=ask + 0.1, amount=10)
except Exception as e:
self.logger.error('>>>>买入{}出错'.format(code))
self.logger.error(e)
else:
del code_price_dict[code]
# 空的时候退出
if not code_price_dict:
break
time.sleep(20)
# 持仓仓位
def get_position(self):
'''
[{'证券代码': '128012', '证券名称': '辉丰转债', '股票余额': 10.0, '可用余额': 10.0,
'市价': 97.03299999999999, '冻结数量': 0, '参考盈亏': 118.77, '参考成本价': 85.156,
'参考盈亏比例(%)': 13.947000000000001, '市值': 970.33, '买入成本': 85.156, '市场代码': 1,
'交易市场': '深圳A股', '股东帐户': '0166448046', '实际数量': 10, 'Unnamed: 15': ''}
:return:
'''
return self.user.position
# 持仓仓位 Dataframe格式
def get_position_df(self):
position_list = self.get_position()
# print(position_list)
df = | pd.DataFrame(position_list) | pandas.DataFrame |
from datetime import datetime, timedelta
import pandas as pd
from ModuleFiles.request import TrendReq
from tqdm import tqdm
import plotly.express as px
import plotly.graph_objects as go
def find_US_Trend(keywords_to_search):
country = "US"
pytrend = TrendReq()
Keyword = keywords_to_search
kw_list = [keywords_to_search]
pytrend.build_payload(kw_list, cat=0, geo=country,timeframe = "now 7-d" , gprop='')
City = pytrend.interest_by_region(inc_low_vol=True, inc_geo_code=True)
City_Names = City.index
City["City_Names"] = City_Names
All_City = list(City["geoCode"])
country = All_City[0]
pytrend = TrendReq()
Keyword = keywords_to_search
kw_list = [keywords_to_search]
pytrend.build_payload(kw_list, cat=0, geo=country,timeframe = "now 7-d" , gprop='')
City = pytrend.interest_by_region(resolution='CITY',inc_low_vol=True, inc_geo_code=False)
City_Names = City.index
City["City_Names"] = City_Names
Country_Trend_Frame = City
for city in All_City[1:]:
country = city
pytrend = TrendReq()
Keyword = keywords_to_search
kw_list = [keywords_to_search]
pytrend.build_payload(kw_list, cat=0, geo=country,timeframe = "now 7-d" , gprop='')
City = pytrend.interest_by_region(resolution='CITY',inc_low_vol=True, inc_geo_code=False)
City_Names = City.index
City["City_Names"] = City_Names
Country_Trend_Frame = pd.concat([Country_Trend_Frame, City])
Country_Trend_Frame.drop_duplicates(subset = Country_Trend_Frame.columns ,keep ="first", inplace = True)
US_Codes = | pd.read_csv("Module-Data/US-Codes.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
import warnings
import datetime
import json
from sklearn.feature_extraction.text import TfidfVectorizer
class TextTransformer(object):
def __init__(self):
self._new_columns = []
self._old_column = None
self._max_features = 100
self._vectorizer = None
def fit(self, X, column):
self._old_column = column
self._vectorizer = TfidfVectorizer(
analyzer="word",
stop_words="english",
lowercase=True,
max_features=self._max_features,
)
x = X[column][~ | pd.isnull(X[column]) | pandas.isnull |
import numpy as np
import pandas as pd
from pandas import Series,DataFrame
s1=Series(['A','B','C','D','E',np.nan])
print(s1)
#validate
print(s1.isnull())
#drop unavailbale values
print(s1.dropna())
df=DataFrame([[1,2,3],[4,5,np.nan],[7,np.nan,10],[np.nan,np.nan,np.nan]])
print(df)
#drona in dataframe
#print(df.dropna()) #delets the entire row that has atleast one nan entry
print(df.dropna(how="all"))
#dropna corresponding to columns
print(df.dropna(axis=1)) #column vise deletion
df2= | DataFrame([[1,2,3,np.nan],[4,5,6,7],[8,9,np.nan,np.nan],[12,np.nan,np.nan,np.nan]])
print(df2) | pandas.DataFrame |
from typing import Union
import os
import json
import pandas as pd
from redata.commons.logger import log_stdout
def save_metadata(json_response: Union[list, dict],
out_file_prefix: str,
root_directory: str = '',
metadata_directory: str = '',
save_csv: bool = False,
log=None):
"""
Write metadata contents to JSON and CSV file
:param json_response: Content in list or dict
:param out_file_prefix: Filename prefix. Appends .json and .csv
:param root_directory: Full path containing the working directory
:param metadata_directory: Metadata path
:param save_csv: Save a CSV file. Default: False
:param log: LogClass or logging object. Default: log_stdout()
"""
if log is None:
log = log_stdout()
log.debug("starting ...")
log.info("")
log.info("** SAVING CURATION METADATA **")
if not root_directory:
root_directory = os.getcwd()
metadata_path = os.path.join(root_directory, metadata_directory)
out_file_prefix = f"{metadata_path}/{out_file_prefix}"
# Write JSON file
json_out_file = f"{out_file_prefix}.json"
if not os.path.exists(json_out_file):
log.info(f"Writing: {json_out_file}")
with open(json_out_file, 'w') as f:
json.dump(json_response, f, indent=4)
else:
log.info(f"File exists: {out_file_prefix}")
# Write CSV file
if save_csv:
csv_out_file = f"{out_file_prefix}.csv"
df = | pd.DataFrame.from_dict(json_response, orient='columns') | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
###########################################################################
# we have searched for keywords in the original news
# for stemmed keywords in the stemmed news
# for lemmatized keywords int the lemmatized news
# now, want to merge all the results to see whats happening
###########################################################################
import pandas as pd
import numpy as np
from functions import add_stem
newsid_synonyms_origin=pd.read_csv('output/file1_keywords_original_keywords.csv') #input: output of solr_indexing_data
print(len(newsid_synonyms_origin))
#287199
newsid_synonyms_stem= | pd.read_csv('output/file1_keywords_stemmed_keywords.csv') | pandas.read_csv |
from pyhdx.panel.template import GoldenElvis, ExtendedGoldenTemplate
from pyhdx.panel.theme import ExtendedGoldenDarkTheme, ExtendedGoldenDefaultTheme
from pyhdx.panel.controllers import *
from pyhdx.panel.main_controllers import ComparisonController, PyHDXController
from pyhdx.panel.views import *
from pyhdx.panel.log import get_default_handler
import sys
from pyhdx import VERSION_STRING_SHORT
from pyhdx.panel.base import BokehFigurePanel, STATIC_DIR
from pyhdx.fileIO import csv_to_dataframe
from pyhdx.panel.sources import DataFrameSource
from pyhdx.panel.transforms import RescaleTransform, ApplyCmapTransform, PeptideLayoutTransform, ResetIndexTransform
from pyhdx.panel.opts import CmapOpts
from pyhdx.panel.filters import UniqueValuesFilter, MultiIndexSelectFilter
from pyhdx.panel.log import StreamToLogger
import logging
import panel as pn
from pyhdx.panel.log import logger
from pyhdx.panel.config import ConfigurationSettings
from pyhdx.local_cluster import default_client
from panel import pane
from lumen.views import PerspectiveView, hvPlotView
from lumen.filters import WidgetFilter, ParamFilter
from pathlib import Path
import pandas as pd
import matplotlib as mpl
import datetime
DEBUG = True
current_dir = Path(__file__).parent
data_dir = current_dir.parent.parent / 'tests' / 'test_data'
global_opts = {'show_grid': True}
cfg = ConfigurationSettings()
@logger('pyhdx')
def main_app(client='default'):
client = default_client() if client == 'default' else client
logger = main_app.logger
# ---------------------------------------------------------------------- #
# SOURCES
# ---------------------------------------------------------------------- #
col_index = pd.MultiIndex.from_tuples([], names=('state', 'quantity'))
df_peptides = pd.DataFrame(columns=col_index)
col_index = pd.MultiIndex.from_tuples([], names=('state', 'exposure'))
row_index = pd.RangeIndex(0, 1, name='r_number')
df_rfu = pd.DataFrame(columns=col_index, index=row_index)
col_index = pd.MultiIndex.from_tuples([], names=('fit_ID', 'state', 'quantity'))
row_index = pd.RangeIndex(0, 1, name='r_number')
df_rates = pd.DataFrame(columns=col_index, index=row_index)
# todo make sure that proper-shaped df is used to initiate stream (and generalize for rectangles plot)
col_index = pd.MultiIndex.from_tuples([], names=('fit_ID', 'state', 'quantity'))
row_index = pd.RangeIndex(0, 1, name='r_number')
df_global_fit = | pd.DataFrame(columns=col_index, index=row_index) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
cfsr_grib2_extract_17var.py:
This script use wgrib2 program to extract 17 variables from NCEP-CFSR dataset.
"""
# meta-data
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, DataQualia Lab Co. Ltd."
__license__ = "Apache License, Version 2.0"
__version__ = "1.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
# end of meta-data
import os, re, subprocess, logging, argparse
import pandas as pd
# Define parameters
VARS = ['h500', 'mslp', 'u200', 'v200', 't200', 'rh700', 't700', 'u700', 'v700',\
'rh850', 't850', 'u850', 'v850', 'rh925', 't925', 'u925', 'v925']
VAR_PATTERN = {'h500': "HGT:500", 'mslp':"PRMSL:mean",\
'u200':"UGRD:200", 'v200':"VGRD:200", 't200':"TMP:200", \
'rh700':"RH:700", 't700':"TMP:700", 'u700':"UGRD:700", 'v700':"VGRD:700",\
'rh850':"RH:850", 't850':"TMP:850", 'u850':"UGRD:850", 'v850':"VGRD:850",\
'rh925':"RH:925", 't925':"TMP:925", 'u925':"UGRD:925", 'v925':"VGRD:925"}
print(VAR_PATTERN)
# Search and parse all NCEP-CFSR data files in grb2 format
def searchCFSR(srcdir):
df = []
for fn in os.listdir(srcdir):
if os.path.isfile(os.path.join(srcdir, fn)) and fn.endswith('.grb2') and ('.pg' in fn):
timestamp = fn.split('.')[0]
df.append({'time':timestamp, 'uri':os.path.join(srcdir, fn)})
df = | pd.DataFrame(df) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
"""
Simulate elections.
Elements of an election
1. Create voter preferences
- Create voter preference distributions
- Create voter preference tolerance distribution
2. Create candidate preferences
3. Simulate voter behavior, strategy
4. Transform voter preferences into candidate scores or rankings
5. Input scores/ranks into election system.
6. Run the election.
7. Measure the results.
Object Data Transfer Model
--------------------------
Voters --> VoterGroup
Voters/VoterGroup --> Candidates
Voters, VoterGroup, Candidates --> Election
To construct models or benchmarks, start by creating object `Voters`.
`Voters` may have various properties such as preference,
voter strategy parameters, tolerance circles, etc. Define these
properties in Voters. Voters can be segregated by groups,
and each group may have different properties. `VoterGroup` is used to
define groups of several `Voters`.
After defining voters, candidates may be defined using class
`Candidate`. `Candidate` definition may be dependent on the voters population,
therefore `Candidate` accepts voters as an argument.
With the voters and candidates define, an election can be generated with
`Election`. `Election` has many subclasses which run the election.
- `BallotGenerator` takes voter and candidate information to generate honest
and tactical ballots.
- `eRunner` handles the running of specific types of elections.
- `ElectionResult` handles the storage of output data.
"""
import collections
import pickle
import copy
from typing import List, NamedTuple, Tuple, Dict
import numpy as np
import pandas as pd
import scipy
from scipy.stats import truncnorm
from votesim import metrics
from votesim import ballot
from votesim import votemethods
from votesim import utilities
from votesim.models import vcalcs
from votesim.models.dataclasses import (VoterData,
VoterGroupData,
CandidateData,
ElectionData,
ElectionResult,
strategy_data,
StrategyData,
)
from votesim.strategy import TacticalBallots
# from votesim.strategy import TacticalBallots, FrontRunners
__all__ = [
'Voters',
'VoterGroup',
'Candidates',
'Election'
]
# Base random seeds
VOTERS_BASE_SEED = 2
CLIMIT_BASE_SEED = 3
CANDIDATES_BASE_SEED = 4
ELECTION_BASE_SEED = 5
#import seaborn as sns
import logging
logger = logging.getLogger(__name__)
def ltruncnorm(loc, scale, size, random_state=None):
"""
Truncated normal random numbers, cut off at locations less than 0.
Parameters
-----------
loc : float
Center coordinate of gaussian distribution
scale : float
Std deviation scale
size : int
Number of random numbers to generate
random_state : None or numpy.random.RandomState
Random number seeding object, or None.
Returns
---------
out : array shaped (size)
Output samples
"""
if scale == 0:
return np.ones(size) * loc
xmin = -loc / scale
t = truncnorm(xmin, 1e6)
s = t.rvs(size=size, random_state=random_state)
s = s * scale + loc
return s
def _RandomState(seed, level=1):
"""
Create random state.
Generate multiple random statse from a single seed, by specifying
different levels for different parts of Election.
Parameters
----------
seed : int
Integer seed
level : int
Anoter integer seed.
"""
if seed is None:
return np.random.RandomState()
else:
return np.random.RandomState((seed, level))
class Voters(object):
"""Create simple normal distribution of voters.
Parameters
----------
seed : int or None
Integer seed for pseudo-random generation. None for random numbers.
tol : float or None
Voter preference max tolerance.
base : str
Voter rating mapping to distance, either:
- 'linear' - Linear mapping of distance to rating
- 'quadratic' - Quadratic mapping of distance to rating
- 'sqrt' - Square root mappiong of distance to rating
order : int
Order or norm calculation for voter-candidate regret distance.
Attributes
----------
data : `votesim.models.dataclasses.VoterData`
Voter data
"""
data: VoterData
def __init__(self, seed: int=None, tol: float=None,
base: str='linear', order: int=1):
self.init(seed, order=order)
self.set_behavior(tol=tol, base=base)
return
@utilities.recorder.record_actions(replace=True)
def init(self, seed: int, order: int):
"""Set pseudorandom seed & distance calculation order."""
self.seed = seed
self._randomstate = _RandomState(seed, VOTERS_BASE_SEED)
self._order = order
self._weights = None
return self
@utilities.recorder.record_actions(replace=True)
def set_behavior(self, tol: float=None, base: str='linear',):
"""Set voter strategy type."""
self._tol = tol
self._base = base
return self
@utilities.recorder.record_actions()
def add_random(self, numvoters, ndim=1, loc=None):
"""Add random normal distribution of voters.
Parameters
----------
numvoters : int
Number of voters to generate
ndim : int
Number of preference dimensions of population
loc : array shaped (ndim,)
Coordinate of voter centroid
"""
rs = self._randomstate
center = np.zeros(ndim)
voters = rs.normal(center, size=(numvoters, ndim))
if loc is not None:
voters = voters + loc
return self._add_voters(voters)
@utilities.recorder.record_actions()
def add_points(self, avgnum, pnum, ndim=1):
"""Add a random point with several clone voters at that point.
Parameters
----------
avgnum : int
Avg. Number of voters per unique point
pnum : int
Number of unique points
ndim : int
Number of dimensions
"""
rs = self._randomstate
center = np.zeros(ndim)
for i in range(pnum):
# coordinate of point
point = rs.normal(center, size=(1, ndim))
# number of voters at the point
voternum = ltruncnorm(1, 1, 1) * avgnum
voternum = int(voternum)
voters = np.ones((voternum, ndim)) * point
self._add_voters(voters)
return self
@utilities.recorder.record_actions()
def add(self, pref):
"""Add arbitrary voters.
Parameters
----------
pref : array shape (a, b)
Voter preferences, `a` is number of voters, `b` pref. dimensions.
"""
return self._add_voters(pref)
def _add_voters(self, pref):
"""Base function for adding 2d array of candidates to election."""
try:
pref = np.row_stack((self._pref, pref))
except (AttributeError, ValueError):
pref = np.atleast_2d(pref)
self._pref = pref
return self
def build(self):
"""Finalize Voter, construct immutable VoterData."""
self.data = VoterData(pref=self._pref,
weights=self._weights,
order=self._order,
stats=None,
tol=self._tol,
base=self._base,
)
return self
def calculate_distances(self, candidates: CandidateData):
"""Preference distances of candidates from voters for building ballots.
Parameters
----------
candidates : votesim.models.dataclasses.CandidateData
Candidate preference data
"""
pref = self.data.pref
order = self.data.order
weights = self.data.weights
distances = vcalcs.voter_distances(voters=pref,
candidates=candidates.pref,
weights=weights,
order=order)
return distances
def honest_ballots(self, candidates: CandidateData):
"""Honest ballots calculated from Candidates."""
distances = self.calculate_distances(candidates)
b = ballot.gen_honest_ballots(distances=distances,
tol=self.data.strategy['tol'],
base=self.data.strategy['base'])
return b
class VoterGroup(object):
"""Group together multiple voter objects & interact with candidates.
Parameters
----------
voters_list : list[Voters]
List of Voters
Attributes
----------
group : list[Voters]
Same as voters_list
"""
def __init__(self, voters_list: List[Voters]):
try:
iter(voters_list)
except Exception:
voters_list = [voters_list]
self.group = voters_list
self._build()
return
def _build(self):
"""Finalize VoterGroup, build immutable VoterGroupData."""
group_datas = tuple(v.build() for v in self.group)
orders = np.array([v.data.order for v in self.group])
if len(orders) > 0:
order = orders[0]
if not np.all(orders == orders[0]):
raise ValueError('Order of voters in group must all be same.')
else:
order = None
# data = self.group[0]
# data = data.replace(pref=self._get_pref())
# self.data = data
pref = self._get_pref()
stats = metrics.VoterStats(pref=pref,
weights=None,
order=order)
group_index = dict(enumerate(self.group_indices))
data = VoterGroupData(groups=group_datas,
pref=pref,
weights=None,
order=order,
stats=stats,
group_index=group_index,
)
self.data = data
return self
def build(self):
"""This is a dummy build and does nothing. VoterGroup is auto-built."""
return self
def _get_pref(self):
vlist = [v.data.pref for v in self.group]
return np.vstack(vlist)
def __getitem__(self, key):
return self.group[key]
@utilities.lazy_property
def group_indices(self):
"""Row indices to obtain child's voters for all children in the voter
preference and ballot arrays.
Returns
-------
slices : list of slice
Slice which returns the Voter group, indexed by group number.
"""
groups = self.group
lengths = [len(v.data.pref) for v in groups]
iarr = np.cumsum(lengths)
iarr = np.append(0, iarr)
slices = [slice(iarr[i], iarr[i+1]) for i in iarr[:-1]]
return slices
def voter_group(vlist) -> VoterGroup:
"""Group together multiple Voters."""
if hasattr(vlist, 'group'):
return vlist
else:
return VoterGroup(vlist)
class Candidates(object):
"""Create candidates for spatial model.
Parameters
-----------
voters : `Voters` or `VoterGroup`
Voters to draw population data.
seed : int or None
Seed for random number generation.
Attributes
----------
pref : array shape (a, b)
Voter preferences, `a` number of candidates,
`b` number of preference dimensions
"""
data: CandidateData
def __init__(self, voters: Voters, seed: int=None):
self._method_records = utilities.recorder.RecordActionCache()
if not hasattr(voters, '__len__'):
voters = [voters]
self.voters = voter_group(voters)
self.set_seed(seed)
return
@utilities.recorder.record_actions()
def set_seed(self, seed: int):
""" Set pseudorandom seed """
self._seed = (seed, CANDIDATES_BASE_SEED)
self._randomstate = _RandomState(*self._seed)
return self
def _add_candidates(self, candidates: np.ndarray):
"""Base function for adding 2d array of candidates to election"""
candidates = np.array(candidates)
assert candidates.ndim == 2, 'candidates array must have ndim=2'
vdata = self.voters.data
try:
candidates = np.row_stack((self._pref, candidates))
except (AttributeError, ValueError):
candidates = np.atleast_2d(candidates)
cdim = candidates.shape[1]
vdim = vdata.pref.shape[1]
condition = cdim == vdim
s = ('dim[1] of candidates (%s) '
'must be same as dim[1] (%s) of self.voters' % (cdim, vdim))
assert condition, s
self._pref = candidates
return self
def reset(self):
"""Reset candidates for a given Voters.
Delete candidate preferences and records"""
try:
self._method_records.reset()
except AttributeError:
pass
try:
del self.data
except AttributeError:
pass
return
@utilities.recorder.record_actions()
def add_random(self, cnum: int, sdev=2):
"""
Add random candidates, uniformly distributed.
Parameters
----------
cnum : int
Number of candidates for election
sdev : float
+- Width of standard deviations to set uniform candidate
generation across voter population
"""
rs = self._randomstate
std = self.voters.data.stats.pref_std
mean = self.voters.data.stats.pref_mean
ndim = std.shape[0]
candidates = rs.uniform(low = -sdev*std,
high = sdev*std,
size = (cnum, ndim)) + mean
return self._add_candidates(candidates)
@utilities.recorder.record_actions()
def add(self, candidates: np.ndarray):
"""Add 2d array of candidates to election, record actions
Parameters
----------
candidates : array shape (a, n)
Candidate preference coordinates.
- a = number of candidates
- n = number of preference dimensions
Returns
-------
out: Candidates
`self`
"""
self._add_candidates(candidates)
return self
def build(self):
"""Construct immutable CandidateData needed for simulation."""
voters = self.voters
pref = self._pref
distances = vcalcs.voter_distances(voters=voters.data.pref,
candidates=pref,
weights=voters.data.weights,
order=voters.data.order,
)
stats = metrics.CandidateStats(pref=pref,
distances=distances)
self.data = CandidateData(pref=self._pref,
distances=distances,
stats=stats)
return self
class StrategiesEmpty(object):
"""Create empty strategy; honest election."""
_strategies = []
data = ()
def __init__(self):
return
def build(self):
return self
def __len__(self):
return 0
class Strategies(object):
"""Strategy constructor for `VoterGroup`."""
def __init__(self, vgroup: VoterGroup):
self._method_records = utilities.recorder.RecordActionCache()
self.voters = voter_group(vgroup)
self.vlen = len(self.voters.group)
self._strategies = []
return
@utilities.recorder.record_actions()
def add(self,
tactics: tuple,
subset: str,
ratio: float,
underdog: int,
groupnum: int=0,
frontrunnertype: str='eliminate',):
"""Set a strategy for a specified voter group.
Parameters
----------
tactics : tuple[str]
Tuple of tactic names.
subset : str
- '' -- No subset
- 'topdog' -- Topdog voter coalition
- 'underdog' -- Underdog voter coalition
ratio : float
Ratio of strategic voters in the group from [0 to 1].
underdog : int or None
Specify the underdog candidate. Set to None to estimate best underdog.
groupnum : int, optional
Voter group number to set strategy. The default is 0.
frontrunnertype : str, optional
Strategy used to determine underdog frontrunner.
The default is 'eliminate'.
Returns
-------
Strategies
Returns `self`.
"""
return self._set(
tactics=tactics,
subset=subset,
ratio=ratio,
underdog=underdog,
groupnum=groupnum,
frontrunnertype=frontrunnertype,
)
@utilities.recorder.record_actions()
def fill(self,
tactics: tuple,
subset: str,
ratio: float,
underdog: int,
groupnum: int,
frontrunnertype='eliminate'):
"""Set strategy for unset groups."""
locations = self.get_no_strategy
for ii in locations:
self._set(
tactics=tactics,
subset=subset,
ratio=ratio,
underdog=underdog,
groupnum=ii,
frontrunnertype=frontrunnertype,
)
return self
def _set(self,
tactics: tuple,
subset: str,
ratio: float,
underdog: int,
groupnum: int,
frontrunnertype='eliminate',
):
group_index = self.voters.group_indices[groupnum]
strat_data = StrategyData(tactics=tactics,
subset=subset,
ratio=ratio,
underdog=underdog,
groupnum=groupnum,
index=group_index,
frontrunnertype=frontrunnertype)
self._strategies.append(strat_data)
return self
def build(self):
"""Construct static data needed to run simulation"""
if len(self.get_no_strategy()) > 0:
raise ValueError('Insufficient strategies have been defined!')
if self.has_duplicates():
raise ValueError('Duplicate strategy entries found.')
self.data = tuple(self._strategies)
return self
def get_no_strategy(self):
"""ndarray : Groups' index locations that have no strategies set."""
no_strat_locs = []
for ii, index in enumerate(self.voters.group_indices):
found = False
for strategy in self._strategies:
if np.all(index == strategy.index):
found = True
if not found:
no_strat_locs.append(ii)
return np.array(no_strat_locs)
def has_duplicates(self):
"""Make sure no duplicate group index + subset locations have been defined.
Return True if duplicates found. False otherwise."""
data = []
for strategy in self._strategies:
index = strategy.index
subset = strategy.subset
data.append((repr(index), subset))
count = collections.Counter(data)
count_values = list(count.values())
iimax = np.argmax(count_values)
if count_values[iimax] > 1:
logger.warn('Duplicate strategy found at strategy #%s', iimax)
return True
return False
def __len__(self):
return len(self._strategies)
class BallotGenerator(object):
"""
Generate ballots from voter and candidate data.
Parameters
----------
voters_list : list of Voter or VoterGroup
Voters of election
candidates : Candidates
Candidates of election
scoremax : int
Maximum score for scored ballots.
"""
tacticalballots : TacticalBallots
honest_ballot_dict : dict
def __init__(self,
voters_list: VoterGroup,
candidates: Candidates,
scoremax: int):
self.candidates = candidates
self.votergroup = voter_group(voters_list)
self.scoremax = scoremax
self._init_honest_builder()
return
def _init_honest_builder(self):
"""Honest ballot constructor for ratings, ranks, scores, and votes."""
cdata = self.candidates.data
blist = []
for voter in self.votergroup.group:
distances = voter.calculate_distances(cdata)
b = ballot.gen_honest_ballots(distances=distances,
tol=voter.data.tol,
base=voter.data.base,
maxscore=self.scoremax,)
blist.append(b)
self.honest_ballot_gen = ballot.CombineBallots(blist)
bdict = {}
bdict['rank'] = self.honest_ballot_gen.ranks
bdict['score'] = self.honest_ballot_gen.scores
bdict['rate'] = self.honest_ballot_gen.ratings
bdict['vote'] = self.honest_ballot_gen.votes
self.honest_ballot_dict = bdict
return
def get_honest_ballots(self, etype: str) -> np.ndarray:
"""Get honest ballot data.
Parameters
----------
etype : str
Election method name.
Returns
-------
out : np.ndarray
Output ballot data array
"""
btype = votemethods.get_ballot_type(etype)
return self.honest_ballot_dict[btype]
def get_ballots(self, etype, strategies=(), result=None, ballots=None):
"""Retrieve tactical ballots.
Parameters
----------
etype : str
Election method
strategies : list of `StrategyData`
Voter strategies to apply onto ballots
result : `ElectionResult`
Previous results which can be used to calculate front runner.
Returns
-------
ballots : ndarray (v, c)
New ballots
group_index : dict
Index locations of voter groups.
"""
if len(strategies) == 0:
ballots = self.get_honest_ballots(etype)
group_index = self.votergroup.data.group_index
else:
if ballots is None:
ballots = self.get_honest_ballots(etype)
if result is None:
raise ValueError('A previous honest result must be provided for tactical ballots.')
tballot_gen = TacticalBallots(etype=etype,
strategies=strategies,
result=result,
ballots=ballots)
ballots = tballot_gen.ballots
group_index = tballot_gen.group_index
# Just save this thing might be useful for debugging.
self.tacticalballots = tballot_gen
return ballots, group_index
class Election(object):
"""
Run an Election with Voters and Candidates
Parameters
------------
voters : None, Voters, VoterGroup, or list of Voters
Voters object specifying the voter preferences and behavior.
candidate : None or Candidates
Candidates object specifying candidate preferences
seed : None
THIS PARAMETER IS NOT REALLY USED FOR NOW. IGNORE!
numwinners : int >= 1
Number of winners for the election
scoremax : int
Maximum score for ballot generation
name : str
Name of election model, used to identify different benchmark models.
save_args : bool (default True)
- If True, save all parameters input into method calls. These
parameters can be used to regenerate specific elections.
- If False, only save parameters input into `self.user_data`.
Attributes
----------
result : ElectionResult
Results storage for Election.
ballotgen : BallotGenerator
VoterBallot data
"""
candidates: Candidates
voters: VoterGroup
data: ElectionData
result: ElectionResult
strategies: Strategies
ballotgen : BallotGenerator
def __init__(self,
voters: VoterGroup=None,
candidates: Candidates=None,
strategies: Strategies=None,
seed=0,
numwinners=1,
scoremax=5,
name = '',
save_args=True,
save_records=True):
self._method_records = utilities.recorder.RecordActionCache()
self.voters: VoterGroup = None
self.candidates: Candidates = None
self.ballotgen: BallotGenerator = None
self.strategies: Strategies = StrategiesEmpty()
self.save_args = save_args
self.save_records = save_records
self.init(seed, numwinners, scoremax, name)
self.set_models(voters, candidates, strategies)
self._result_calc = ElectionResultCalc(self)
return
@utilities.recorder.record_actions(replace=True)
def init(self, seed, numwinners, scoremax, name):
"""Initialize some election properties"""
self._set_seed(seed)
self.numwinners = numwinners
self.scoremax = scoremax
self.name = name
return
def set_models(
self,
voters: Voters=None,
candidates: Candidates=None,
strategies: Strategies=None,
):
"""Set new voter or candidate model.
Parameters
----------
voters : Voters or None
New voters object.
If None, use previously inputed Voters.
candidates : Candidates or None
New candidates object.
If None, use previously inputed Candidates.
strategies : `votesim.models.spatial.Strategies`
New strategies object.
If None, use the previously inputed Strategies
"""
if voters is not None:
self.voters = voter_group(voters)
if candidates is not None:
self.candidates = candidates.build()
if self.voters is not None:
self.ballotgen = BallotGenerator(
self.voters,
self.candidates,
scoremax=self.scoremax
)
if strategies is not None:
if len(strategies) > 0:
self.strategies = strategies.build()
else:
self.strategies = strategies
return
def _set_seed(self, seed):
""" Set pseudorandom seed """
if seed is None:
self._seed = None
self._randomstate = _RandomState(None)
else:
self._seed = (seed, ELECTION_BASE_SEED)
self._randomstate = _RandomState(*self._seed)
return
def user_data(self, d=None, **kwargs):
"""Record any additional data the user wishes to record.
Parameters
----------
**d : dict
Write any keys and associated data here
"""
udict = {}
udict.update(kwargs)
if d is not None:
# d is supposed to be a dictionary. Try to update our dict with it
try:
udict.update(d)
# Maybe the user is trying to create a parameter `d`
except TypeError:
udict['d'] = d
self._user_data = udict
return
def reset(self):
"""Delete election data for the current run --
voter preferences, candidate preferences, and ballots,
Clear the kind of data that can be regenerated if desired.
Do not clear statistics.
"""
self.voters.reset()
self.candidates.reset()
def delete(a):
try:
delattr(self, a)
except AttributeError:
pass
delete('winners')
delete('ties')
delete('output')
delete('vballots')
raise NotImplementedError('This function probably doesnt work.')
return
@utilities.recorder.record_actions(
replace=True,
exclude=['ballots', 'result'])
def run(self,
etype=None,
ballots=None,
result=None,
force_honest=False) -> ElectionResult:
"""Run the election using `votemethods.eRunner`.
Parameters
----------
etype : str
Election method. Either `etype` or `method` must be input.
ballots : ndarray
Initial ballots to be used in election.
result : ElectionResult
Election, you can input honest election
using this object to reduce repetitive computation cost.
force_honest : bool
Force run of an honest election without strategy
Returns
-------
out : ElectionResult
"""
return self._run(
etype=etype,
ballots=ballots,
result=result,
force_honest=force_honest
)
def _run(self,
etype=None,
ballots=None,
result=None,
force_honest=False) -> ElectionResult:
logger.debug('Running %s, %s, %s', etype)
strategies = self.strategies.data
if force_honest:
strategies = ()
# Auto run an honest election if result is not available.
elif len(strategies) > 0 and result is None and ballots is None:
result = self._run(
etype=etype,
ballots=None,
result=None,
force_honest=True)
# Retrieve some tactical ballots from honest data.
ballots, group_index = self.ballotgen.get_ballots(
etype=etype,
strategies=strategies,
result=result,
ballots=ballots
)
# Generate a deterministic seed based on candidates and voters
runner = votemethods.eRunner(
etype=etype,
numwinners=self.numwinners,
ballots=ballots,
seed=self._tie_seed(),
# rstate=self._randomstate,
)
self.data = ElectionData(
ballots=runner.ballots,
winners=runner.winners,
ties=runner.ties,
group_index=group_index
)
self.result = self._result_calc.update(
runner=runner,
voters=self.voters.data,
candidates=self.candidates.data,
election=self.data
)
return self.result
def _tie_seed(self):
"""Generate pseudorandom seed for tie breaking."""
v = self.voters.data.pref[0,0] * 1000
c = self.candidates.data.pref[0,0] * 10000
return int(abs(v) + abs(c))
def rerun(self, d):
"""Re-run an election found in dataframe. Find the election
data from the dataframe index.
Parameters
----------
d : dict
Dictionary or Series of election data,
generated from self.dataseries() or self.dataframe().
Returns
-------
out : Election
Newly constructed election object with re-run parameters.
"""
series = d
def filterdict(d, kfilter):
new = {}
num = len(kfilter)
for k, v in d.items():
if k.startswith(kfilter):
newkey = k[num :]
new[newkey] = v
return new
filter_key = 'args.candidate.'
c_dict = filterdict(series, filter_key)
filter_key = 'args.election.'
e_dict = filterdict(series, filter_key)
filter_key = 'args.strategy.'
s_dict = filterdict(series, filter_key)
# Construct voters
vnum = len(self.voters.group)
new_voters = []
for ii in range(vnum):
filter_key = 'args.voter-%s.' % ii
v_dict = filterdict(series, filter_key)
v = type(self.voters.group[ii])()
#v = type(self.voters)()
v._method_records.reset()
v._method_records.run_dict(v_dict, v)
new_voters.append(v)
# Construct candidates
c = type(self.candidates)(voters=new_voters)
c._method_records.reset()
c._method_records.run_dict(c_dict, c)
# Construct strategies
s_dict2 = {}
for k, v in s_dict.items():
try:
if not np.isnan(v):
s_dict2[k] = v
except TypeError:
s_dict2[k] = v
slen = len(s_dict2)
if slen > 0:
s = type(self.strategies)(c.voters)
s._method_records.reset()
s._method_records.run_dict(s_dict2, s)
else:
s = None
enew = Election(voters=c.voters, candidates=c, strategies=s)
enew._method_records.run_dict(e_dict, enew)
return enew
def copy(self) -> 'Election':
"""Copy election."""
return copy.copy(self)
def save(self, name, reset=True):
"""Pickle election data.
Parameters
----------
name : str
Name of new pickle file to dump Election into.
reset : bool
If True (default), delete election data that can be regenerated.
"""
if reset:
self.reset()
with open(name, 'wb') as file1:
pickle.dump(self, file1)
return
def dataseries(self, index=None) -> pd.Series:
"""Retrieve pandas data series of output data."""
return self._result_calc.dataseries(index=index)
def dataframe(self) -> pd.DataFrame:
"""Construct data frame from results history."""
return self._result_calc.dataframe()
def append_stat(self, d: metrics.BaseStats, name='', update_docs=False):
return self._result_calc.append_stat(d=d,
name=name,
update_docs=update_docs)
def calculate_distance(voters: VoterData, candidates: CandidateData):
"""Re-calculate distance as the distance from Election may have error."""
distances = vcalcs.voter_distances(
voters=voters.pref,
candidates=candidates.pref,
weights=voters.weights,
order=voters.order,
)
return distances
class ElectionResultCalc(object):
"""
Store Election result output. Generated as attribute of Election.
This is a sort of messy back-end that does all the calculations. The
result front end is `ElectionResult`.
Parameters
----------
e : Election
Election to extract results from.
Attributes
----------
runner : :class:`~votesim.votemethods.voterunner.eRunner`
Output from election running class for the last run election.
results : dict
Results of last run election key prefixes:
- 'output.*' -- Prefix for election output results
- 'args.etype' -- Election method
- 'args.voter.*' -- Voter input arguments
- 'args.election.*' -- Election input arguments
- 'args.user.*' -- User defined input arguments
Output Specification
--------------------
For each election output keys are generated as dataframes or dataseries.
- Voter parameters are specified as `args.voter-vnum.a.func.argname`
- `vnum` = Voter group number
- `a` = Method call number (a method could be called multiple times.)
- `func` = Name of the called method
- `argname` = Name of the set parameter for the method.
- Candidate parameters are specified as `args.candidate.a.func.arg`
- User parameters are specified as `args.user.name`
- `name` is the user's inputted parameter name
"""
def __init__(self, e: Election):
self.election = e
self.save_args = e.save_args
# Store results as list of dict
self._output_history = []
return
def update(self,
runner: votemethods.eRunner,
voters: VoterData,
candidates: CandidateData,
election: ElectionData) -> ElectionResult:
"""Get election results."""
self.runner = runner
self.winners = runner.winners
self.ties = runner.ties
self.ballots = runner.ballots
self.electionStats = metrics.ElectionStats(voters=voters,
candidates=candidates,
election=election)
### Build dictionary of all arguments and output
output = {}
output.update(self._get_parameters())
output['output'] = self.electionStats.get_dict()
output = utilities.misc.flatten_dict(output, sep='.')
self.output = output
if self.election.save_records:
self._output_history.append(output)
result = ElectionResult(winners=self.winners,
ties=self.ties,
ballots=self.ballots,
runner=self.runner,
output=self.output,
output_docs=self.output_docs,
stats=self.electionStats,
scoremax=self.election.scoremax
)
return result
def _get_parameter_keys(self) -> list:
"""Retrieve election input parameter keys."""
return list(self._get_parameters().keys())
def _get_method_records(self) -> dict:
"""Retrieve records that can be used to regenerate result."""
candidates = self.election.candidates
voters = self.election.voters
strategies = self.election.strategies
election = self.election
# get voter parameters
vrecords = []
for v in voters.group:
vrecords.append(v._method_records.dict)
# get candidate parameters
crecord = candidates._method_records.dict
# get strategy parameters
if hasattr(strategies, '_method_records'):
srecord = strategies._method_records.dict
else:
srecord = {}
# get election parameters
erecord = election._method_records.dict
# Save etype and name in special parameters
params = {}
for key in erecord:
if 'run.etype' in key:
params['args.etype'] = erecord[key]
elif '.init.name' in key:
params['args.name'] = erecord[key]
# Save all method call arguments
if self.save_args:
params['args.candidate'] = crecord
if len(srecord) > 0:
params['args.strategy'] = srecord
for ii, vrecord in enumerate(vrecords):
params['args.voter-%s' % ii] = vrecord
params['args.election'] = erecord
return params
def _get_user_data(self) -> dict:
# Retrieve user data
# Determine if user data exists. If not, save default save_args
try:
userdata = self.election._user_data
if len(userdata) == 0:
userdata = {}
except AttributeError:
userdata = {}
params = {}
# Add user data to params
for key, value in userdata.items():
newkey = 'args.user.' + key
params[newkey] = value
return params
def _get_parameters(self) -> dict:
d1 = self._get_user_data()
d2 = self._get_method_records()
d1.update(d2)
return d1
# def ___get_parameters(self) -> dict:
# """Retrieve election input parameters."""
# params = {}
# candidates = self.election.candidates
# voters = self.election.voters
# election = self.election
# # get candidate parameters
# crecord = candidates._method_records.dict
# # get voter parameters
# vrecords = []
# for v in voters.group:
# vrecords.append(v._method_records.dict)
# # get election parameters
# erecord = election._method_records.dict
# # Retrieve user data
# # Determine if user data exists. If not, save default save_args
# save_args = self.save_args
# try:
# userdata = self.election._user_data
# if len(userdata) == 0:
# save_args = True
# except AttributeError:
# save_args = True
# userdata = {}
# # Add user data to params
# for key, value in userdata.items():
# newkey = 'args.user.' + key
# params[newkey] = value
# # Save etype and name in special parameters
# for key in erecord:
# if 'run.etype' in key:
# params['args.etype'] = erecord[key]
# elif '.init.name' in key:
# params['args.name'] = erecord[key]
# # Save all method call arguments
# if self.save_args or save_args:
# params['args.candidate'] = crecord
# for ii, vrecord in enumerate(vrecords):
# params['args.voter-%s' % ii] = vrecord
# params['args.election'] = erecord
# params = utilities.misc.flatten_dict(params, sep='.')
# return params
@utilities.lazy_property
def output_docs(self) -> dict:
"""Retrieve output documentation."""
docs = self.electionStats.get_docs()
docs = utilities.misc.flatten_dict(docs, sep='.')
return docs
def dataseries(self, index=None):
"""Retrieve pandas data series of output data."""
if index is None:
return pd.Series(self.output)
else:
return pd.Series(self._output_history[index])
def dataframe(self):
"""Construct data frame from results history."""
series = []
for r in self._output_history:
series.append(pd.Series(r))
df = pd.concat(series, axis=1, ignore_index=True).transpose()
df = df.reset_index(drop=True)
self._dataframe = df.infer_objects()
return df
def append_stat(self, d: metrics.BaseStats, name='', update_docs=False):
"""Append custom user stat object to the last result entry.
Parameters
----------
d : subtype of `metrics.BaseStats` or dict
Additional outputs to add to the result.
name : str
Optional, name of outputs.
"""
try:
dict1 = d._dict
docs1 = d._docs
name1 = d._name
except AttributeError:
dict1 = d
name1 = name
docs1 = {}
dict1 = {'output.' + name1 : dict1}
dict1 = utilities.misc.flatten_dict(dict1, sep='.')
result = self._output_history[-1]
for key in dict1:
if key in result:
s = 'Duplicate output key "%s" found for custom stat.' % key
raise ValueError(s)
result.update(dict1)
return
class ResultRecord(object):
"""Store election results here."""
def __init__(self):
self.output_history = []
def append(self, result: ElectionResult):
output = result.output
self.output = output
self.output_history.append(output)
return
def dataseries(self, index=None):
"""Retrieve pandas data series of output data."""
if index is None:
return pd.Series(self.output)
else:
return pd.Series(self.output_history[index])
def dataframe(self):
"""Construct data frame from results history."""
series = []
for r in self.output_history:
series.append( | pd.Series(r) | pandas.Series |
import pytest
import pandas as pd
from pandas.testing import assert_frame_equal
from mbf_nested_intervals import merge_df_intervals, merge_df_intervals_with_callback
class TestIntervals:
def test_merge_intervals(self):
df = pd.DataFrame(
[
{"chr": "1", "start": 0, "stop": 1000, "key": "a"},
{"chr": "1", "start": 850, "stop": 860, "key": "b"},
{"chr": "1", "start": 900, "stop": 1100, "key": "c"},
{"chr": "2", "start": 900, "stop": 1100, "key": "d"},
]
)
merged = merge_df_intervals(df)
should = pd.DataFrame(
[
{"index": 2, "chr": "1", "start": 0, "stop": 1100, "key": "c"},
{"index": 3, "chr": "2", "start": 900, "stop": 1100, "key": "d"},
]
).set_index("index")
should.index.name = None
assert_frame_equal(merged, should)
def test_merge_intervals2(self):
import traceback
import warnings
import sys
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
log = file if hasattr(file,'write') else sys.stderr
traceback.print_stack(file=log)
log.write(warnings.formatwarning(message, category, filename, lineno, line))
warnings.showwarning = warn_with_traceback
df = pd.DataFrame(
[
{"chr": "Chromosome", "start": 10, "stop": 100},
{"chr": "Chromosome", "start": 400, "stop": 450},
{"chr": "Chromosome", "start": 80, "stop": 120},
{"chr": "Chromosome", "start": 600, "stop": 700},
]
)
merged = merge_df_intervals(df)
should = pd.DataFrame(
[
{"index": 2, "chr": "Chromosome", "start": 10, "stop": 120},
{"index": 1, "chr": "Chromosome", "start": 400, "stop": 450},
{"index": 3, "chr": "Chromosome", "start": 600, "stop": 700},
]
).set_index("index")
should.index.name = None
assert_frame_equal(merged, should)
def test_merge_intervals_with_strand(self):
import traceback
import warnings
import sys
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
log = file if hasattr(file,'write') else sys.stderr
traceback.print_stack(file=log)
log.write(warnings.formatwarning(message, category, filename, lineno, line))
warnings.showwarning = warn_with_traceback
df = pd.DataFrame(
[
{"chr": "Chromosome", "start": 10, "stop": 100, 'strand': 1},
{"chr": "Chromosome", "start": 400, "stop": 450, 'strand': 1},
{"chr": "Chromosome", "start": 80, "stop": 120, 'strand': -1},
{"chr": "Chromosome", "start": 600, "stop": 700, 'strand': 1},
{"chr": "Chromosome", "start": 100, "stop": 140, 'strand': -1},
]
)
merged = merge_df_intervals(df)
should = pd.DataFrame(
[
{"index": 0, "chr": "Chromosome", "start": 10, "stop": 100, 'strand': 1},
{"index": 4, "chr": "Chromosome", "start": 80, "stop": 140, 'strand': -1},
{"index": 1, "chr": "Chromosome", "start": 400, "stop": 450, 'strand': 1},
{"index": 3, "chr": "Chromosome", "start": 600, "stop": 700, 'strand': 1},
]
).set_index("index")
should.index.name = None
print(merged)
| assert_frame_equal(merged, should) | pandas.testing.assert_frame_equal |
import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None # default='warn'
def delete_first_line(path5):
dataframe = | pd.read_csv(path5, sep=",", header=0) | pandas.read_csv |
#!/usr/bin/env python3
# Roam - Copyright 2018 <NAME>; see LICENSE in project root
import sys
import json
from io import BytesIO
import numpy as np
import pandas
import matplotlib.pyplot as plt
import bottle
from munch import Munch
import mapalgo as MAP
class AppState(object):
def __init__(self, df):
colinfo = get_colinfo(df)
procdf = preprocess(df, colinfo)
idpos = [i for i, info in enumerate(colinfo) if info.kind == "id"]
weights = np.ones(len(colinfo), dtype=float)
self._origdf = df
self._colinfo = colinfo
self._procdf = procdf
self._idpos = idpos
self._filter = np.arange(len(df))
self._type = 1
# use setter to normalize weights
self.weights = weights
self._map = None
self.update()
@property
def origdf(self):
return self._origdf
@property
def procdf(self):
return self._procdf
@property
def colinfo(self):
return self._colinfo
@property
def weights(self):
return self._weights
@property
def wdata(self):
return self._wdata
@property
def map(self):
return self._map
@property
def filter(self):
return self._filter
@filter.setter
def filter(self, indices):
if not indices:
indices = np.arange(len(self._origdf))
else:
indices = np.asarray(indices)
indices.sort()
self._filter = indices
@weights.setter
def weights(self, w):
# zero any ID weights
w = np.array(w, dtype=float)
assert len(w) == len(self._colinfo), "incorrect number of weights"
w[w < 0] = 0.0
w[self._idpos] = 0.0
wsum = np.sum(w)
if wsum < 1.0e-6:
# handle case of zero-sum weights
w[:] = 1.0
w[self._idpos] = 0.0
wsum = np.sum(w)
w /= wsum
self._weights = w
# compute weighted version of data
mappedw = np.zeros(len(self._procdf.columns))
for i, info in enumerate(self._colinfo):
for pos in info.idxpos:
mappedw[pos] = w[i]
self._wdata = self._procdf.as_matrix() * mappedw
@property
def type(self):
return self._type
@type.setter
def type(self, type):
self._type = type
def update(self):
prior = self._map
filt = self._filter
newmap = MAP.create_map_from_data("data", self._wdata[filt], type=self._type, labels=filt, prior=prior)
self._map = newmap
def get_colinfo_1(df, i, catmax=10):
colname = df.columns[i]
dtype = df.dtypes[i]
count = df[[colname]].apply(pandas.Series.nunique)[0]
mean = None
sd = None
colinfo = Munch(
pos=i,
name=colname,
nvals=count
)
if dtype == np.dtype('O'):
# presume string
if count <= catmax:
colinfo.kind = "cat"
cats = list(df[colname].dropna().unique())
cats.sort()
colinfo.cats = cats
else:
colinfo.kind = "id"
else:
colinfo.kind = "scale"
colinfo.mean = df[colname].mean()
colinfo.sd = df[colname].std(ddof=0)
return colinfo
def get_colinfo(df, catmax=10):
return [get_colinfo_1(df, i, catmax=catmax) for i in range(len(df.columns))]
def preprocess(df, colinfo):
catcols = [info.name for info in colinfo if info.kind == "cat"]
scalecols = [info.name for info in colinfo if info.kind == "scale"]
cats = pandas.get_dummies(df[catcols], columns=catcols)
cats = cats.fillna(0)
vals = df[scalecols]
vals = (vals - vals.mean()) / vals.std(ddof=0)
vals = vals.fillna(vals.mean())
merged = cats.join(vals)
# create mapping from colinfo names onto merged dataframe
for info in colinfo:
idxpos = []
for i, colname in enumerate(merged.columns):
if colname.startswith(info.name):
idxpos.append(i)
info.idxpos = idxpos
return merged
# NumpyJSONEncoder class adapted from:
# https://stackoverflow.com/questions/27050108/convert-numpy-type-to-python/27050186
class NumpyJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyJSONEncoder, self).default(obj)
def jsonout(callback):
def wrapper(*args, **kw):
result = callback(*args, **kw)
if isinstance(result, pandas.DataFrame):
return result.to_json(orient='records')
return json.dumps(result, cls=NumpyJSONEncoder)
return wrapper
app = bottle.Bottle()
appState = None
@app.get("/vars", apply=jsonout)
def vars():
return appState.colinfo
@app.get("/data", apply=jsonout)
def data():
return appState.origdf
@app.get("/weights", apply=jsonout)
def weights():
return {info.name: w for info, w in zip(appState.colinfo, appState.weights)}
@app.get("/ypos", apply=jsonout)
def ypos():
N = len(appState.origdf)
ypos = np.zeros((N, 2))
ypos[appState.filter] = appState.map.y
return ypos
@app.get("/prob", apply=jsonout)
def prob():
return appState.map.prob
@app.post("/update", apply=jsonout)
def update():
wdict = bottle.request.json
assert wdict, "no weights provided"
weights = [wdict.get(info.name, 0.0) for info in appState.colinfo]
appState.weights = weights
appState.update()
return ypos()
@app.get("/filter", apply=jsonout)
def get_filter():
return appState.filter
@app.post("/filter", apply=jsonout)
def update_filter():
indices = bottle.request.json
appState.filter = indices
appState.update()
return ypos()
@app.post("/type", apply=jsonout)
def update_filter():
appState.type = bottle.request.json
appState.update()
print("AppState: {}".format(appState.type))
return ypos()
COLORS = [
"#1b9e77",
"#d95f02",
"#7570b3",
"#e7298a",
"#66a61e",
"#e6ab02",
"#a6761d",
"#666666",
]
@app.get("/graph")
def graph():
colors = [COLORS[0]] * len(appState.map.y)
colorattr = bottle.request.params.get("c")
coldict = {info.name: info for info in appState.colinfo}
if colorattr and coldict.get(colorattr):
origdf = appState.origdf
kind = coldict[colorattr].kind
if kind == "cat":
vals = origdf[colorattr].unique()
mapping = dict(zip(vals, range(len(vals))))
colors = [COLORS[mapping[c] % len(COLORS)] for c in origdf[colorattr]]
elif kind == "scale":
# LATER!
colors = [COLORS[0]] * len(appState.map.y)
io = BytesIO()
y = appState.map.y
plt.figure(figsize=(15,15))
plt.scatter(y[:,0], y[:,1], s=50, color=colors, alpha=0.6)
plt.savefig(io, format='png')
bottle.response.set_header("Content-Type", "image/png")
return io.getvalue()
# MY_DIR = os.path.abspath(os.getcwd())
# STATIC_DIR = os.path.join(MY_DIR, "web")
# @app.route("/static/<path:path>")
# def client_files(path):
# print(f"fetching path {path}")
# return bottle.static_file(path, root=STATIC_DIR)
if __name__ == '__main__':
fname = sys.argv[1]
df = | pandas.read_csv(fname, na_values='?') | pandas.read_csv |
import pandas as pd
import numpy as np
import requests
import datetime
import os.path
import pprint
import shapefile
import simplejson
import statistics
import math, sys
from urllib.parse import urlparse
from collections import defaultdict
from libs.CovidDatasets import get_public_data_base_url
from libs.us_state_abbrev import us_state_abbrev, us_fips
from libs.datasets import FIPSPopulation
from libs.enums import Intervention
from libs.functions.calculate_projections import (
get_state_projections_df,
get_county_projections_df,
)
from libs.datasets.projections_schema import OUTPUT_COLUMN_REMAP_TO_RESULT_DATA
from libs.datasets.results_schema import (
RESULT_DATA_COLUMNS_STATES,
RESULT_DATA_COLUMNS_COUNTIES,
)
from libs.constants import NULL_VALUE
# @TODO: Attempt today. If that fails, attempt yesterday.
latest = datetime.date.today() - datetime.timedelta(days=1)
def _get_interventions_df():
# TODO: read this from a dataset class
interventions_url = "https://raw.githubusercontent.com/covid-projections/covid-projections/master/src/assets/data/interventions.json"
interventions = requests.get(interventions_url).json()
return pd.DataFrame(list(interventions.items()), columns=["state", "intervention"])
def _get_abbrev_df():
# TODO: read this from a dataset class
return pd.DataFrame(
list(us_state_abbrev.items()), columns=["state", "abbreviation"]
)
county_replace_with_null = {"Unassigned": NULL_VALUE}
def _get_usa_by_county_df():
# TODO: read this from a dataset class
url = "{}/data/cases-jhu/csse_covid_19_daily_reports/{}.csv".format(
get_public_data_base_url(), latest.strftime("%m-%d-%Y")
)
raw_df = pd.read_csv(url, dtype={"FIPS": str})
raw_df["FIPS"] = raw_df["FIPS"].astype(str).str.zfill(5)
column_mapping = {
"Province_State": "Province/State",
"Country_Region": "Country/Region",
"Last_Update": "Last Update",
"Lat": "Latitude",
"Long_": "Longitude",
"Combined_Key": "Combined Key",
"Admin2": "County",
"FIPS": "State/County FIPS Code",
}
remapped_df = raw_df.rename(columns=column_mapping)
# USA only
us_df = remapped_df[(remapped_df["Country/Region"] == "US")]
jhu_column_names = [
"Province/State",
"Country/Region",
"Last Update",
"Latitude",
"Longitude",
"Confirmed",
"Recovered",
"Deaths",
"Active",
"County",
"State/County FIPS Code",
"Combined Key",
# Incident rate and people tested do not seem to be available yet
# "Incident Rate",
# "People Tested",
]
final_df = | pd.DataFrame(us_df, columns=jhu_column_names) | pandas.DataFrame |
def meanOrderFrequency(path_to_dataset):
"""
Displays the mean order frequency by utilizing the orders table.
:param path_to_dataset: this path should have all the .csv files for the dataset
:type path_to_dataset: str
"""
assert isinstance(path_to_dataset, str)
import pandas as pd
order_file_path = path_to_dataset + '/orders.csv'
orders = pd.read_csv(order_file_path)
print('On an average, people order once every ', orders['days_since_prior_order'].mean(), 'days')
def numOrdersVsDays(path_to_dataset):
"""
Displays the number of orders and how this number varies with change in days since last order.
:param path_to_dataset: this path should have all the .csv files for the dataset
:type path_to_dataset: str
"""
assert isinstance(path_to_dataset, str)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
order_file_path = path_to_dataset + '/orders.csv'
orders = pd.read_csv(order_file_path)
order_by_date = orders.groupby(by='days_since_prior_order').count()
fig = plt.figure(figsize = [15, 7.5])
ax = fig.add_subplot()
order_by_date['order_id'].plot.bar(color = '0.75')
ax.set_xticklabels(ax.get_xticklabels(), fontsize= 15)
plt.yticks(fontsize=16)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x))))
ax.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x/1000))))
ax.set_xlabel('Days since previous order', fontsize=16)
ax.set_ylabel('Number of orders / 1000', fontsize=16)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_children()[7].set_color('0.1')
ax.get_children()[14].set_color('0.1')
ax.get_children()[21].set_color('0.1')
ax.get_children()[30].set_color('0.1')
my_yticks = ax.get_yticks()
plt.yticks([my_yticks[-2]], visible=True)
plt.xticks(rotation = 'horizontal');
def numOrderDaysSizeBubble(path_to_dataset):
"""
Plots a bubble plot in which:
x: Days since Previous Order
y: Number of orders/1000
size: Average Size of order given it was placed on x
:param path_to_dataset: this path should have all the .csv files for the dataset
:type path_to_dataset: str
"""
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
assert isinstance(path_to_dataset, str)
order_file_path = path_to_dataset + '/orders.csv'
order_product_prior_file_path = path_to_dataset + '/order_products__prior.csv'
orders = pd.read_csv(order_file_path)
order_products_prior = pd.read_csv(order_product_prior_file_path)
order_id_count_products = order_products_prior.groupby(by='order_id').count()
orders_with_count = order_id_count_products.merge(orders, on='order_id')
order_by_date = orders.groupby(by='days_since_prior_order').count()
# take above table and group by days_since_prior_order
df_mean_order_size = orders_with_count.groupby(by='days_since_prior_order').mean()['product_id']
df_mean_order_renamed = df_mean_order_size.rename('average_order_size')
bubble_plot_dataframe = pd.concat([order_by_date['order_id'], df_mean_order_renamed], axis=1)
bubble_plot_dataframe['average_order_size'].index.to_numpy()
fig = plt.figure(figsize=[15,7.5])
ax = fig.add_subplot()
plt.scatter(bubble_plot_dataframe['average_order_size'].index.to_numpy(), bubble_plot_dataframe['order_id'].values, s=((bubble_plot_dataframe['average_order_size'].values/bubble_plot_dataframe['average_order_size'].values.mean())*10)**3.1, alpha=0.5, c = '0.5')
plt.xticks(np.arange(0, 31, 1.0));
ax.xaxis.grid(True)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlabel('Days since previous order', fontsize=16)
ax.set_ylabel('Number of orders / 1000', fontsize=16)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x))))
ax.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x/1000))))
my_yticks = ax.get_yticks()
plt.yticks([my_yticks[-2], my_yticks[0]], visible=True);
fig = plt.figure(figsize=[10,9])
ax = fig.add_subplot()
plt.scatter(bubble_plot_dataframe['average_order_size'].index.to_numpy()[:8], bubble_plot_dataframe['order_id'].values[:8], s=((bubble_plot_dataframe['average_order_size'].values[:8]/bubble_plot_dataframe['average_order_size'].values.mean())*10)**3.1, alpha=0.5, c = '0.5')
plt.xticks(np.arange(0, 8, 1.0));
ax.xaxis.grid(True)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlabel('Days since previous order', fontsize=16)
ax.set_ylabel('Number of orders / 1000', fontsize=16)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x))))
ax.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x/1000))))
my_yticks = ax.get_yticks()
plt.yticks([my_yticks[-2], my_yticks[0]], visible=True);
def orderTimeHeatMaps(path_to_dataset):
"""
Plots the distribution of order with respect to hour of day and day of the week.
:param path_to_dataset: this path should have all the .csv files for the dataset
:type path_to_dataset: str
"""
assert isinstance(path_to_dataset, str)
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
order_file_path = path_to_dataset + '/orders.csv'
orders = pd.read_csv(order_file_path)
grouped_data = orders.groupby(["order_dow", "order_hour_of_day"])["order_number"].aggregate("count").reset_index()
grouped_data = grouped_data.pivot('order_dow', 'order_hour_of_day', 'order_number')
grouped_data.index = pd.CategoricalIndex(grouped_data.index, categories=[0,1,2,3,4,5,6])
grouped_data.sort_index(level=0, inplace=True)
plt.figure(figsize=(12,6))
hour_of_day = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14','15','16', '17', '18', '19','20', '21', '22', '23']
dow = [ 'SUN', 'MON', 'TUES', 'WED', 'THUR','FRI','SAT']
ax = sns.heatmap(grouped_data, xticklabels=hour_of_day,yticklabels=dow,cbar_kws={'label': 'Number Of Orders Made/1000'})
cbar = ax.collections[0].colorbar
cbar.set_ticks([0, 10000, 20000, 30000, 40000, 50000])
cbar.set_ticklabels(['0','10.0','20.0','30.0','40.0','50.0'])
ax.figure.axes[-1].yaxis.label.set_size(15)
ax.figure.axes[0].yaxis.label.set_size(15)
ax.figure.axes[0].xaxis.label.set_size(15)
ax.set(xlabel='Hour of Day', ylabel= "Day of the Week")
ax.set_title("Number of orders made by Day of the Week vs Hour of Day", fontsize=15)
plt.show()
grouped_data = orders.groupby(["order_dow", "order_hour_of_day"])["order_number"].aggregate("count").reset_index()
grouped_data = grouped_data.pivot('order_dow', 'order_hour_of_day', 'order_number')
grouped_data.index = pd.CategoricalIndex(grouped_data.index, categories=[0,1,2,3,4,5,6])
grouped_data.sort_index(level=0, inplace=True)
plt.figure(figsize=(12,6))
hour_of_day = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14','15','16', '17', '18', '19','20', '21', '22', '23']
dow = [ 'SUN', 'MON', 'TUES', 'WED', 'THUR','FRI','SAT']
ax = sns.heatmap(np.log(grouped_data), xticklabels=hour_of_day,yticklabels=dow,cbar=False)
cbar = ax.collections[0].colorbar
ax.figure.axes[-1].yaxis.label.set_size(15)
ax.figure.axes[0].yaxis.label.set_size(15)
ax.figure.axes[0].xaxis.label.set_size(15)
ax.set(xlabel='Hour of Day', ylabel= "Day of the Week")
ax.set_title("Number of orders made by Day of the Week vs Hour of Day (Log Scale)", fontsize=15)
plt.show()
def generateWordCloud(path_to_dataset):
"""
Generates word cloud.
:param path_to_dataset: path to dataset
:type path_to_dataset: str
"""
assert isinstance(path_to_dataset, str)
from wordcloud import WordCloud
import pandas as pd
import matplotlib.pyplot as plt
product_path = path_to_dataset + "/products.csv"
aisles_path = path_to_dataset + "/aisles.csv"
departments_path = path_to_dataset + "/departments.csv"
order_product_prior_path = path_to_dataset + "/order_products__prior.csv"
df_products = | pd.read_csv(product_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
from itertools import product
import numpy as np
import pytest
import pandas.util.testing as tm
from pandas import DatetimeIndex, MultiIndex
from pandas._libs import hashtable
from pandas.compat import range, u
@pytest.mark.parametrize('names', [None, ['first', 'second']])
def test_unique(names):
mi = MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]], names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names)
tm.assert_index_equal(res, exp)
mi = MultiIndex.from_arrays([list('aaaa'), list('abab')],
names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([list('aa'), list('ab')], names=mi.names)
tm.assert_index_equal(res, exp)
mi = MultiIndex.from_arrays([list('aaaa'), list('aaaa')], names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([['a'], ['a']], names=mi.names)
tm.assert_index_equal(res, exp)
# GH #20568 - empty MI
mi = MultiIndex.from_arrays([[], []], names=names)
res = mi.unique()
tm.assert_index_equal(mi, res)
def test_unique_datetimelike():
idx1 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01',
'2015-01-01', 'NaT', 'NaT'])
idx2 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02',
'2015-01-02', 'NaT', '2015-01-01'],
tz='Asia/Tokyo')
result = MultiIndex.from_arrays([idx1, idx2]).unique()
eidx1 = DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT'])
eidx2 = DatetimeIndex(['2015-01-01', '2015-01-02',
'NaT', '2015-01-01'],
tz='Asia/Tokyo')
exp = MultiIndex.from_arrays([eidx1, eidx2])
tm.assert_index_equal(result, exp)
@pytest.mark.parametrize('level', [0, 'first', 1, 'second'])
def test_unique_level(idx, level):
# GH #17896 - with level= argument
result = idx.unique(level=level)
expected = idx.get_level_values(level).unique()
tm.assert_index_equal(result, expected)
# With already unique level
mi = MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]],
names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
tm.assert_index_equal(result, expected)
# With empty MI
mi = MultiIndex.from_arrays([[], []], names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
@pytest.mark.parametrize('dropna', [True, False])
def test_get_unique_index(idx, dropna):
mi = idx[[0, 1, 0, 1, 1, 0, 0]]
expected = mi._shallow_copy(mi[[0, 1]])
result = mi._get_unique_index(dropna=dropna)
assert result.unique
tm.assert_index_equal(result, expected)
def test_duplicate_multiindex_labels():
# GH 17464
# Make sure that a MultiIndex with duplicate levels throws a ValueError
with pytest.raises(ValueError):
mi = MultiIndex([['A'] * 10, range(10)], [[0] * 10, range(10)])
# And that using set_levels with duplicate levels fails
mi = MultiIndex.from_arrays([['A', 'A', 'B', 'B', 'B'],
[1, 2, 1, 2, 3]])
with pytest.raises(ValueError):
mi.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]],
inplace=True)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], [1, 1, 2],
[1, 'a', 1]])
def test_duplicate_level_names(names):
# GH18872, GH19029
mi = MultiIndex.from_product([[0, 1]] * 3, names=names)
assert mi.names == names
# With .rename()
mi = MultiIndex.from_product([[0, 1]] * 3)
mi = mi.rename(names)
assert mi.names == names
# With .rename(., level=)
mi.rename(names[1], level=1, inplace=True)
mi = mi.rename([names[0], names[2]], level=[0, 2])
assert mi.names == names
def test_duplicate_meta_data():
# GH 10115
mi = MultiIndex(
levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
for idx in [mi,
mi.set_names([None, None]),
mi.set_names([None, 'Num']),
mi.set_names(['Upper', 'Num']), ]:
assert idx.has_duplicates
assert idx.drop_duplicates().names == idx.names
def test_has_duplicates(idx, idx_dup):
# see fixtures
assert idx.is_unique is True
assert idx.has_duplicates is False
assert idx_dup.is_unique is False
assert idx_dup.has_duplicates is True
mi = MultiIndex(levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
assert mi.is_unique is False
assert mi.has_duplicates is True
def test_has_duplicates_from_tuples():
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
(u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),
(u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),
(u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),
(u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),
(u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129),
(u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111),
(u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114),
(u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121),
(u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126),
(u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155),
(u('x'), u('out'), u('z'), 33, u('y'), u('in'), | u('z') | pandas.compat.u |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 31 12:44:45 2022
@author: Danie
DEPRICATED FILE DO NOT USE
Old biolerplate work for when stage 3 classification was just going to be selecting a set of representative
points create by ModelsA B and C. Has terrible accuracy and should absolutley not be used.
"""
import tensorflow as tf
import numpy as np
import matplotlib as plt
import os
import csv
from test_iterator import TestIterator
import pandas as pd
import plotly.express as px
import plotly
from tqdm import tqdm
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from sklearn import svm
import random
def getLabel(base_dir, csvf, test_image):
label = 0
with open(os.path.join(base_dir, csvf), "r") as f:
reader = csv.reader(f)
for row in reader:
if row[0] == test_image+".JPG" or row[0] == test_image+".JPEG":
label = row[1]
break
return label
if __name__=="__main__":
base_dir = "E:\\Coding\\Dataset"
test_dir = "images_test"
label_csv = "test_labels.csv"
networkA = 'network_A_1'
networkB = 'network_B_1'
networkC = 'network_C_1'
base_dir = "E:\\Coding\\Dataset"
batch_size = 32
csvf = "test_labels.csv"
svc = svm.SVC()
totdf = | pd.DataFrame() | pandas.DataFrame |
### Model Training and Evaluation ###
# Author: <NAME>
from IPython import get_ipython
get_ipython().magic('reset -sf')
import os, shutil
import re
import csv
from utils import bigrams, trigram, replace_collocation
import timeit
import pandas as pd
import string
from nltk.stem import PorterStemmer
import numpy as np
import pickle
import random
from scipy import sparse
import itertools
from scipy.io import savemat, loadmat
import string
from sklearn.feature_extraction.text import CountVectorizer
from gensim.test.utils import datapath
from gensim.models import Word2Vec
from data_concatenate import *
import gensim.downloader
import pprint
from manetm import etm
pp = pprint.PrettyPrinter()
# =============================================================================
DATAPATH = os.path.expanduser("~/Dropbox/MPCounterfactual/src/etm/")
OVERLEAF = os.path.expanduser("~/Dropbox/Apps/Overleaf/FOMC_Summer2019/files")
if not os.path.exists(f"{DATAPATH}/full_results"):
os.makedirs(f"{DATAPATH}/full_results")
# =============================================================================
# #0 Set Parameters
# =============================================================================
# Dataset parameters
embphrase_itera = 2 # Number of phrase iterations
embthreshold = "inf" # Threshold value for collocations. If "inf": no collocations
emb_max_df = 1.0 # in a maximum of # % of documents if # is float.
emb_min_df = 1 # choose desired value for min_df // in a minimum of # documents
EMBDATASET = f"BBTSST_min{emb_min_df}_max{emb_max_df}_iter{embphrase_itera}_th{embthreshold}"
meetphrase_itera = 2
meetthreshold = "inf"
meetmax_df = 1.0
meetmin_df = 10
MEEETDATA = f"MEET_min{meetmin_df}_max{meetmax_df}_iter{meetphrase_itera}_th{meetthreshold}"
sta_phrase_itera = 2
sta_threshold = "inf"
sta_max_df = 1.0
sta_min_df = 5
STADATASET = f"STATEMENT_min{sta_min_df}_max{sta_max_df}_iter{sta_phrase_itera}_th{sta_threshold}"
# Skipgram parameters
mincount = 2
d_sg = 1
vectorsize = 300
iters = 100
cpus = 16
neg_samples = 10
windowsize = 4
# Activate code
d_construct = False
d_estemb = False
d_train = False
# =============================================================================
# #1 Data Preparation
# =============================================================================
if d_construct:
print("*" * 80)
print("Build datasets")
build_embdata(emb_max_df,emb_min_df,embphrase_itera,embthreshold,EMBDATASET)
build_meeting(meetmax_df,meetmin_df,meetphrase_itera,meetthreshold,MEEETDATA)
build_statement_data(sta_max_df,sta_min_df,sta_phrase_itera,sta_threshold,STADATASET)
print("*" * 80)
print("Datasets Construction Completed")
print("*" * 80)
print("\n")
# =============================================================================
# #2 Train Word Embeddings
# =============================================================================
if d_estemb:
# Run Skipgram
print(f"Run model: {EMBDATASET}\n")
sentences = pd.read_pickle(f"{DATAPATH}/data/{EMBDATASET}/corpus.pkl")
model = gensim.models.Word2Vec(sentences, min_count = mincount, sg = d_sg, vector_size = vectorsize, epochs = iters, workers = cpus, negative = neg_samples, window = windowsize)
model.save(f"{DATAPATH}/word2vecmodels/{EMBDATASET}")
# Write the embeddings to a file
with open(f"{DATAPATH}/embeddings/{EMBDATASET}_emb", 'w') as f:
for v in model.wv.index_to_key:
vec = list(model.wv[v])
f.write(v + ' ')
vec_str = ['%.9f' % val for val in vec]
vec_str = " ".join(vec_str)
f.write(vec_str + '\n')
print("*" * 80)
print(f"Embedding Training Completed")
print("*" * 80)
print("\n\n")
# =============================================================================
## #4 TRAIN TOPIC MODELS
# =============================================================================
# =============================================================================
## SPEAKERDATA - Pre-Trained Emb.
# speaker_ckpt = etm(f"{SPEAKERDATA}",data_path=f"{DATAPATH}/data/{SPEAKERDATA}",
# emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
# batch_size = 1000, epochs = 150, num_topics = 10, rho_size = 300,
# emb_size = 300, t_hidden_size = 800, theta_act = 'relu',
# train_embeddings = 0, lr = 0.005, lr_factor=4.0,
# mode = 'train', optimizer = 'adam',
# seed = 2019, enc_drop = 0.0, clip = 0.0,
# nonmono = 10, wdecay = 1.2e-6, anneal_lr = 0, bow_norm = 1,
# num_words =10, log_interval = 2, visualize_every = 10, eval_batch_size = 1000,
# load_from = "", tc = 1, td = 1)
#
# print(f"Evaluate model: {speaker_ckpt}")
# etm(f"{SPEAKERDATA}",data_path=f"{DATAPATH}/data/{SPEAKERDATA}",
# emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
# mode = 'eval', load_from = f"{speaker_ckpt}", train_embeddings = 0 ,tc = 1, td = 1)
#
# print(f"Output the topic distribution: {speaker_ckpt}")
# etm(f"{SPEAKERDATA}",data_path=f"{DATAPATH}/data/{SPEAKERDATA}",
# emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
# mode = 'retrieve',load_from = f"{speaker_ckpt}", train_embeddings = 0)
#
# =============================================================================
## MEETINGS - Pre-Trained Emb.
if d_train:
meeting_ckpt = etm(f"{MEEETDATA}",data_path=f"{DATAPATH}/data/{MEEETDATA}",
emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
batch_size = 1000, epochs = 2000, num_topics = 10, rho_size = 300,
emb_size = 300, t_hidden_size = 800, theta_act = 'relu',
train_embeddings = 0, lr = 0.005, lr_factor=4.0,
mode = 'train', optimizer = 'adam',
seed = 2019, enc_drop = 0.0, clip = 0.0,
nonmono = 10, wdecay = 1.2e-6, anneal_lr = 0, bow_norm = 1,
num_words =10, log_interval = 2, visualize_every = 10, eval_batch_size = 1000,
load_from = "", tc = 1, td = 1)
print(f"Evaluate model: {meeting_ckpt}")
etm(f"{MEEETDATA}",data_path=f"{DATAPATH}/data/{MEEETDATA}",
emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
mode = 'eval', load_from = f"{meeting_ckpt}", train_embeddings = 0 ,tc = 1, td = 1)
print(f"Output the topic distribution: {meeting_ckpt}")
etm(f"{MEEETDATA}",data_path=f"{DATAPATH}/data/{MEEETDATA}",
emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
mode = 'retrieve',load_from = f"{meeting_ckpt}", train_embeddings = 0)
# =============================================================================
## #5 OUTPUT DATA
# =============================================================================
# =============================================================================
# ## SPEAKERDATA
# raw_df = pd.read_pickle(f"raw_data/{SPEAKERDATA}.pkl")
#
# idx_df = pd.read_pickle(f'{OUTPATH}/{SPEAKERDATA}/original_indices.pkl')
# idx_df = idx_df.set_index(0)
# idx_df["d"] = 1
#
# data = pd.concat([idx_df,raw_df],axis=1)
# data_clean = data[data["d"]==1].reset_index()
# dist_df = pd.read_pickle(f'{speaker_ckpt}tpdist.pkl')
#
# full_data = pd.concat([data_clean,dist_df],axis=1)
# full_data.drop(columns=["content","d"],inplace=True)
# full_data.rename(columns=dict(zip([i for i in range(10)],[f"topic_{i}" for i in range(10)])),inplace=True)
# full_data["start_date"] = pd.to_datetime(full_data["start_date"])
# full_data.to_stata(f"{DATAPATH}/full_results/{SPEAKERDATA}.dta",convert_dates={"start_date":"td"})
#
# =============================================================================
### MEETING ###
# Retrieve raw data
raw_df = pd.read_pickle(f"raw_data/{MEEETDATA}.pkl")
idx_df = pd.read_pickle(f'{OUTPATH}/{MEEETDATA}/original_indices.pkl')
idx_df = idx_df.set_index(0)
idx_df["d"] = 1
data = pd.concat([idx_df,raw_df],axis=1)
data_clean = data[data["d"]==1].reset_index()
dist_df = pd.read_pickle(f'{meeting_ckpt}tpdist.pkl')
full_data = pd.concat([data_clean,dist_df],axis=1)
full_data.drop(columns=["content"],inplace=True)
full_data.rename(columns=dict(zip([i for i in range(10)],[f"topic_{i}" for i in range(10)])),inplace=True)
full_data["date"] = full_data["start_date"]
full_data.to_stata(f"{DATAPATH}/full_results/{MEEETDATA}.dta",convert_dates={"date":"td"})
full_data.to_pickle(f"{DATAPATH}/full_results/{MEEETDATA}.pkl")
### MEETING SAMPLED ###
# Retrieve raw data
raw_df = pd.read_pickle(f"raw_data/{MEETDATASAMPLE}.pkl")
idx_df = | pd.read_pickle(f'{OUTPATH}/{MEETDATASAMPLE}/original_indices.pkl') | pandas.read_pickle |
import re
from inspect import isclass
import numpy as np
import pandas as pd
import pytest
from mock import patch
import woodwork as ww
from woodwork.accessor_utils import (
_is_dask_dataframe,
_is_dask_series,
_is_koalas_dataframe,
_is_koalas_series,
init_series,
)
from woodwork.exceptions import (
ColumnNotPresentError,
IndexTagRemovedWarning,
ParametersIgnoredWarning,
TypeConversionError,
TypingInfoMismatchWarning,
WoodworkNotInitError,
)
from woodwork.logical_types import (
URL,
Address,
Age,
AgeFractional,
AgeNullable,
Boolean,
BooleanNullable,
Categorical,
CountryCode,
Datetime,
Double,
EmailAddress,
Filepath,
Integer,
IntegerNullable,
IPAddress,
LatLong,
NaturalLanguage,
Ordinal,
PersonFullName,
PhoneNumber,
PostalCode,
SubRegionCode,
Unknown,
)
from woodwork.table_accessor import (
WoodworkTableAccessor,
_check_index,
_check_logical_types,
_check_partial_schema,
_check_time_index,
_check_unique_column_names,
_check_use_standard_tags,
_infer_missing_logical_types,
)
from woodwork.table_schema import TableSchema
from woodwork.tests.testing_utils import (
is_property,
is_public_method,
to_pandas,
validate_subset_schema,
)
from woodwork.tests.testing_utils.table_utils import assert_schema_equal
from woodwork.utils import import_or_none
dd = import_or_none("dask.dataframe")
ks = import_or_none("databricks.koalas")
def test_check_index_errors(sample_df):
error_message = "Specified index column `foo` not found in dataframe"
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_index(dataframe=sample_df, index="foo")
if isinstance(sample_df, pd.DataFrame):
# Does not check for index uniqueness with Dask
error_message = "Index column must be unique"
with pytest.raises(LookupError, match=error_message):
_check_index(sample_df, index="age")
def test_check_logical_types_errors(sample_df):
error_message = "logical_types must be a dictionary"
with pytest.raises(TypeError, match=error_message):
_check_logical_types(sample_df, logical_types="type")
bad_logical_types_keys = {
"full_name": None,
"age": None,
"birthday": None,
"occupation": None,
}
error_message = re.escape(
"logical_types contains columns that are not present in dataframe: ['birthday', 'occupation']"
)
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_logical_types(sample_df, bad_logical_types_keys)
def test_check_time_index_errors(sample_df):
error_message = "Specified time index column `foo` not found in dataframe"
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_time_index(dataframe=sample_df, time_index="foo")
def test_check_unique_column_names_errors(sample_df):
if _is_koalas_dataframe(sample_df):
pytest.skip("Koalas enforces unique column names")
duplicate_cols_df = sample_df.copy()
if _is_dask_dataframe(sample_df):
duplicate_cols_df = dd.concat(
[duplicate_cols_df, duplicate_cols_df["age"]], axis=1
)
else:
duplicate_cols_df.insert(0, "age", [18, 21, 65, 43], allow_duplicates=True)
with pytest.raises(
IndexError, match="Dataframe cannot contain duplicate columns names"
):
_check_unique_column_names(duplicate_cols_df)
def test_check_use_standard_tags_errors():
error_message = "use_standard_tags must be a dictionary or a boolean"
with pytest.raises(TypeError, match=error_message):
_check_use_standard_tags(1)
def test_accessor_init(sample_df):
assert sample_df.ww.schema is None
sample_df.ww.init()
assert isinstance(sample_df.ww.schema, TableSchema)
def test_accessor_schema_property(sample_df):
sample_df.ww.init()
assert sample_df.ww._schema is not sample_df.ww.schema
assert sample_df.ww._schema == sample_df.ww.schema
def test_set_accessor_name(sample_df):
df = sample_df.copy()
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.name
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.name = "name"
df.ww.init()
assert df.ww.name is None
df.ww.name = "name"
assert df.ww.schema.name == "name"
assert df.ww.name == "name"
def test_rename_init_with_name(sample_df):
df = sample_df.copy()
df.ww.init(name="name")
assert df.ww.name == "name"
df.ww.name = "new_name"
assert df.ww.schema.name == "new_name"
assert df.ww.name == "new_name"
def test_name_error_on_init(sample_df):
err_msg = "Table name must be a string"
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.init(name=123)
def test_name_error_on_update(sample_df):
sample_df.ww.init()
err_msg = "Table name must be a string"
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.name = 123
def test_name_persists_after_drop(sample_df):
df = sample_df.copy()
df.ww.init()
df.ww.name = "name"
assert df.ww.name == "name"
dropped_df = df.ww.drop(["id"])
assert dropped_df.ww.name == "name"
assert dropped_df.ww.schema.name == "name"
def test_set_accessor_metadata(sample_df):
df = sample_df.copy()
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.metadata
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.metadata = {"new": "metadata"}
df.ww.init()
assert df.ww.metadata == {}
df.ww.metadata = {"new": "metadata"}
assert df.ww.schema.metadata == {"new": "metadata"}
assert df.ww.metadata == {"new": "metadata"}
def test_set_metadata_after_init_with_metadata(sample_df):
df = sample_df.copy()
df.ww.init(table_metadata={"new": "metadata"})
assert df.ww.metadata == {"new": "metadata"}
df.ww.metadata = {"new": "new_metadata"}
assert df.ww.schema.metadata == {"new": "new_metadata"}
assert df.ww.metadata == {"new": "new_metadata"}
def test_metadata_persists_after_drop(sample_df):
df = sample_df.copy()
df.ww.init()
df.ww.metadata = {"new": "metadata"}
assert df.ww.metadata == {"new": "metadata"}
dropped_df = df.ww.drop(["id"])
assert dropped_df.ww.metadata == {"new": "metadata"}
assert dropped_df.ww.schema.metadata == {"new": "metadata"}
def test_metadata_error_on_init(sample_df):
err_msg = "Table metadata must be a dictionary."
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.init(table_metadata=123)
def test_metadata_error_on_update(sample_df):
sample_df.ww.init()
err_msg = "Table metadata must be a dictionary."
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.metadata = 123
def test_accessor_physical_types_property(sample_df):
sample_df.ww.init(logical_types={"age": "Categorical"})
assert isinstance(sample_df.ww.physical_types, dict)
assert set(sample_df.ww.physical_types.keys()) == set(sample_df.columns)
for k, v in sample_df.ww.physical_types.items():
logical_type = sample_df.ww.columns[k].logical_type
if _is_koalas_dataframe(sample_df) and logical_type.backup_dtype is not None:
assert v == logical_type.backup_dtype
else:
assert v == logical_type.primary_dtype
def test_accessor_separation_of_params(sample_df):
# mix up order of acccessor and schema params
schema_df = sample_df.copy()
schema_df.ww.init(
name="test_name",
index="id",
semantic_tags={"id": "test_tag"},
time_index="signup_date",
)
assert schema_df.ww.semantic_tags["id"] == {"index", "test_tag"}
assert schema_df.ww.index == "id"
assert schema_df.ww.time_index == "signup_date"
assert schema_df.ww.name == "test_name"
def test_init_with_full_schema(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema", semantic_tags={"id": "test_tag"}, index="id")
schema = schema_df.ww._schema
head_df = schema_df.head(2)
assert head_df.ww.schema is None
head_df.ww.init_with_full_schema(schema=schema)
assert head_df.ww._schema is schema
assert head_df.ww.name == "test_schema"
assert head_df.ww.semantic_tags["id"] == {"index", "test_tag"}
iloc_df = schema_df.loc[[2, 3]]
assert iloc_df.ww.schema is None
iloc_df.ww.init_with_full_schema(schema=schema)
assert iloc_df.ww._schema is schema
assert iloc_df.ww.name == "test_schema"
assert iloc_df.ww.semantic_tags["id"] == {"index", "test_tag"}
# Extra parameters do not take effect
assert isinstance(iloc_df.ww.logical_types["id"], Integer)
def test_accessor_init_errors_methods(sample_df):
methods_to_exclude = ["init", "init_with_full_schema", "init_with_partial_schema"]
public_methods = [
method
for method in dir(sample_df.ww)
if is_public_method(WoodworkTableAccessor, method)
]
public_methods = [
method for method in public_methods if method not in methods_to_exclude
]
method_args_dict = {
"add_semantic_tags": [{"id": "new_tag"}],
"describe": None,
"pop": ["id"],
"describe": None,
"describe_dict": None,
"drop": ["id"],
"get_valid_mi_columns": None,
"mutual_information": None,
"mutual_information_dict": None,
"remove_semantic_tags": [{"id": "new_tag"}],
"rename": [{"id": "new_id"}],
"reset_semantic_tags": None,
"select": [["Double"]],
"set_index": ["id"],
"set_time_index": ["signup_date"],
"set_types": [{"id": "Integer"}],
"to_disk": ["dir"],
"to_dictionary": None,
"value_counts": None,
"infer_temporal_frequencies": None,
}
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
for method in public_methods:
func = getattr(sample_df.ww, method)
method_args = method_args_dict[method]
with pytest.raises(WoodworkNotInitError, match=error):
if method_args:
func(*method_args)
else:
func()
def test_accessor_init_errors_properties(sample_df):
props_to_exclude = ["iloc", "loc", "schema", "_dataframe"]
props = [
prop
for prop in dir(sample_df.ww)
if is_property(WoodworkTableAccessor, prop) and prop not in props_to_exclude
]
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
for prop in props:
with pytest.raises(WoodworkNotInitError, match=error):
getattr(sample_df.ww, prop)
def test_init_accessor_with_schema_errors(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init()
schema = schema_df.ww.schema
iloc_df = schema_df.iloc[:, :-1]
assert iloc_df.ww.schema is None
error = "Provided schema must be a Woodwork.TableSchema object."
with pytest.raises(TypeError, match=error):
iloc_df.ww.init_with_full_schema(schema=int)
error = (
"Woodwork typing information is not valid for this DataFrame: "
"The following columns in the typing information were missing from the DataFrame: {'ip_address'}"
)
with pytest.raises(ValueError, match=error):
iloc_df.ww.init_with_full_schema(schema=schema)
def test_accessor_with_schema_parameter_warning(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema", semantic_tags={"id": "test_tag"}, index="id")
schema = schema_df.ww.schema
head_df = schema_df.head(2)
warning = (
"A schema was provided and the following parameters were ignored: index, "
"time_index, logical_types, already_sorted, semantic_tags, use_standard_tags"
)
with pytest.warns(ParametersIgnoredWarning, match=warning):
head_df.ww.init_with_full_schema(
index="ignored_id",
time_index="ignored_time_index",
logical_types={"ignored": "ltypes"},
already_sorted=True,
semantic_tags={"ignored_id": "ignored_test_tag"},
use_standard_tags={"id": True, "age": False},
schema=schema,
)
assert head_df.ww.name == "test_schema"
assert head_df.ww.semantic_tags["id"] == {"index", "test_tag"}
def test_accessor_getattr(sample_df):
schema_df = sample_df.copy()
# We can access attributes on the Accessor class before the schema is initialized
assert schema_df.ww.schema is None
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
schema_df.ww.index
schema_df.ww.init()
assert schema_df.ww.name is None
assert schema_df.ww.index is None
assert schema_df.ww.time_index is None
assert set(schema_df.ww.columns.keys()) == set(sample_df.columns)
error = re.escape("Woodwork has no attribute 'not_present'")
with pytest.raises(AttributeError, match=error):
sample_df.ww.init()
sample_df.ww.not_present
def test_getitem(sample_df):
df = sample_df
df.ww.init(
time_index="signup_date",
index="id",
name="df_name",
logical_types={"age": "Double"},
semantic_tags={"age": {"custom_tag"}},
)
assert list(df.columns) == list(df.ww.schema.columns)
subset = ["id", "signup_date"]
df_subset = df.ww[subset]
pd.testing.assert_frame_equal(to_pandas(df[subset]), to_pandas(df_subset))
assert subset == list(df_subset.ww._schema.columns)
assert df_subset.ww.index == "id"
assert df_subset.ww.time_index == "signup_date"
subset = ["age", "email"]
df_subset = df.ww[subset]
pd.testing.assert_frame_equal(to_pandas(df[subset]), to_pandas(df_subset))
assert subset == list(df_subset.ww._schema.columns)
assert df_subset.ww.index is None
assert df_subset.ww.time_index is None
assert isinstance(df_subset.ww.logical_types["age"], Double)
assert df_subset.ww.semantic_tags["age"] == {"custom_tag", "numeric"}
subset = df.ww[[]]
assert len(subset.ww.columns) == 0
assert subset.ww.index is None
assert subset.ww.time_index is None
series = df.ww["age"]
pd.testing.assert_series_equal(to_pandas(series), to_pandas(df["age"]))
assert isinstance(series.ww.logical_type, Double)
assert series.ww.semantic_tags == {"custom_tag", "numeric"}
series = df.ww["id"]
pd.testing.assert_series_equal(to_pandas(series), to_pandas(df["id"]))
assert isinstance(series.ww.logical_type, Integer)
assert series.ww.semantic_tags == {"index"}
def test_getitem_init_error(sample_df):
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
sample_df.ww["age"]
def test_getitem_invalid_input(sample_df):
df = sample_df
df.ww.init()
error_msg = r"Column\(s\) '\[1, 2\]' not found in DataFrame"
with pytest.raises(ColumnNotPresentError, match=error_msg):
df.ww[["email", 2, 1]]
error_msg = "Column with name 'invalid_column' not found in DataFrame"
with pytest.raises(ColumnNotPresentError, match=error_msg):
df.ww["invalid_column"]
def test_accessor_equality(sample_df):
# Confirm equality with same schema and same data
schema_df = sample_df.copy()
schema_df.ww.init()
copy_df = schema_df.ww.copy()
assert schema_df.ww == copy_df.ww
# Confirm not equal with different schema but same data
copy_df.ww.set_time_index("signup_date")
assert schema_df.ww != copy_df.ww
# Confirm not equal with same schema but different data - only pandas
loc_df = schema_df.ww.loc[:2, :]
if isinstance(sample_df, pd.DataFrame):
assert schema_df.ww != loc_df
else:
assert schema_df.ww == loc_df
def test_accessor_shallow_equality(sample_df):
metadata_table = sample_df.copy()
metadata_table.ww.init(table_metadata={"user": "user0"})
diff_metadata_table = sample_df.copy()
diff_metadata_table.ww.init(table_metadata={"user": "user2"})
assert diff_metadata_table.ww.__eq__(metadata_table, deep=False)
assert not diff_metadata_table.ww.__eq__(metadata_table, deep=True)
schema = metadata_table.ww.schema
diff_data_table = metadata_table.ww.loc[:2, :]
same_data_table = metadata_table.ww.copy()
assert diff_data_table.ww.schema.__eq__(schema, deep=True)
assert same_data_table.ww.schema.__eq__(schema, deep=True)
assert same_data_table.ww.__eq__(metadata_table.ww, deep=False)
assert same_data_table.ww.__eq__(metadata_table.ww, deep=True)
assert diff_data_table.ww.__eq__(metadata_table.ww, deep=False)
if isinstance(sample_df, pd.DataFrame):
assert not diff_data_table.ww.__eq__(metadata_table.ww, deep=True)
def test_accessor_init_with_valid_string_time_index(time_index_df):
time_index_df.ww.init(name="schema", index="id", time_index="times")
assert time_index_df.ww.name == "schema"
assert time_index_df.ww.index == "id"
assert time_index_df.ww.time_index == "times"
assert isinstance(
time_index_df.ww.columns[time_index_df.ww.time_index].logical_type, Datetime
)
def test_accessor_init_with_numeric_datetime_time_index(time_index_df):
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints", logical_types={"ints": Datetime})
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(
name="schema", time_index="strs", logical_types={"strs": Datetime}
)
assert schema_df.ww.time_index == "ints"
assert schema_df["ints"].dtype == "datetime64[ns]"
def test_accessor_with_numeric_time_index(time_index_df):
# Set a numeric time index on init
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints")
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Integer)
assert date_col.semantic_tags == {"time_index", "numeric"}
# Specify logical type for time index on init
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints", logical_types={"ints": "Double"})
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"time_index", "numeric"}
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="strs", logical_types={"strs": "Double"})
date_col = schema_df.ww.columns["strs"]
assert schema_df.ww.time_index == "strs"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"time_index", "numeric"}
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(time_index="ints", logical_types={"ints": "Categorical"})
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(time_index="letters", logical_types={"strs": "Integer"})
# Set numeric time index after init
schema_df = time_index_df.copy()
schema_df.ww.init(logical_types={"ints": "Double"})
assert schema_df.ww.time_index is None
schema_df.ww.set_time_index("ints")
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"numeric", "time_index"}
def test_numeric_time_index_dtypes(numeric_time_index_df):
numeric_time_index_df.ww.init(time_index="ints")
assert numeric_time_index_df.ww.time_index == "ints"
assert isinstance(numeric_time_index_df.ww.logical_types["ints"], Integer)
assert numeric_time_index_df.ww.semantic_tags["ints"] == {"time_index", "numeric"}
numeric_time_index_df.ww.set_time_index("floats")
assert numeric_time_index_df.ww.time_index == "floats"
assert isinstance(numeric_time_index_df.ww.logical_types["floats"], Double)
assert numeric_time_index_df.ww.semantic_tags["floats"] == {"time_index", "numeric"}
numeric_time_index_df.ww.set_time_index("with_null")
assert numeric_time_index_df.ww.time_index == "with_null"
assert isinstance(
numeric_time_index_df.ww.logical_types["with_null"], IntegerNullable
)
assert numeric_time_index_df.ww.semantic_tags["with_null"] == {
"time_index",
"numeric",
}
def test_accessor_init_with_invalid_string_time_index(sample_df):
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
sample_df.ww.init(name="schema", time_index="full_name")
def test_accessor_init_with_string_logical_types(sample_df):
logical_types = {"full_name": "natural_language", "age": "Double"}
schema_df = sample_df.copy()
schema_df.ww.init(name="schema", logical_types=logical_types)
assert isinstance(schema_df.ww.columns["full_name"].logical_type, NaturalLanguage)
assert isinstance(schema_df.ww.columns["age"].logical_type, Double)
logical_types = {
"full_name": "NaturalLanguage",
"age": "IntegerNullable",
"signup_date": "Datetime",
}
schema_df = sample_df.copy()
schema_df.ww.init(
name="schema", logical_types=logical_types, time_index="signup_date"
)
assert isinstance(schema_df.ww.columns["full_name"].logical_type, NaturalLanguage)
assert isinstance(schema_df.ww.columns["age"].logical_type, IntegerNullable)
assert schema_df.ww.time_index == "signup_date"
def test_int_dtype_inference_on_init():
df = pd.DataFrame(
{
"ints_no_nans": pd.Series([1, 2]),
"ints_nan": pd.Series([1, np.nan]),
"ints_NA": pd.Series([1, pd.NA]),
"ints_NA_specified": pd.Series([1, pd.NA], dtype="Int64"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["ints_no_nans"].dtype == "int64"
assert df["ints_nan"].dtype == "float64"
assert df["ints_NA"].dtype == "category"
assert df["ints_NA_specified"].dtype == "Int64"
def test_bool_dtype_inference_on_init():
df = pd.DataFrame(
{
"bools_no_nans": pd.Series([True, False]),
"bool_nan": pd.Series([True, np.nan]),
"bool_NA": pd.Series([True, pd.NA]),
"bool_NA_specified": pd.Series([True, pd.NA], dtype="boolean"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["bools_no_nans"].dtype == "bool"
assert df["bool_nan"].dtype == "category"
assert df["bool_NA"].dtype == "category"
assert df["bool_NA_specified"].dtype == "boolean"
def test_str_dtype_inference_on_init():
df = pd.DataFrame(
{
"str_no_nans": pd.Series(["a", "b"]),
"str_nan": pd.Series(["a", np.nan]),
"str_NA": pd.Series(["a", pd.NA]),
"str_NA_specified": pd.Series([1, pd.NA], dtype="string"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["str_no_nans"].dtype == "category"
assert df["str_nan"].dtype == "category"
assert df["str_NA"].dtype == "category"
assert df["str_NA_specified"].dtype == "category"
def test_float_dtype_inference_on_init():
df = pd.DataFrame(
{
"floats_no_nans": pd.Series([1.1, 2.2]),
"floats_nan": pd.Series([1.1, np.nan]),
"floats_NA": pd.Series([1.1, pd.NA]),
"floats_nan_specified": pd.Series([1.1, np.nan], dtype="float"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["floats_no_nans"].dtype == "float64"
assert df["floats_nan"].dtype == "float64"
assert df["floats_NA"].dtype == "category"
assert df["floats_nan_specified"].dtype == "float64"
def test_datetime_dtype_inference_on_init():
df = pd.DataFrame(
{
"date_no_nans": pd.Series([pd.to_datetime("2020-09-01")] * 2),
"date_nan": pd.Series([pd.to_datetime("2020-09-01"), np.nan]),
"date_NA": pd.Series([pd.to_datetime("2020-09-01"), pd.NA]),
"date_NaT": pd.Series([pd.to_datetime("2020-09-01"), pd.NaT]),
"date_NA_specified": pd.Series(
[pd.to_datetime("2020-09-01"), pd.NA], dtype="datetime64[ns]"
),
}
)
df.ww.init()
assert df["date_no_nans"].dtype == "datetime64[ns]"
assert df["date_nan"].dtype == "datetime64[ns]"
assert df["date_NA"].dtype == "datetime64[ns]"
assert df["date_NaT"].dtype == "datetime64[ns]"
assert df["date_NA_specified"].dtype == "datetime64[ns]"
def test_datetime_inference_with_format_param():
df = pd.DataFrame(
{
"index": [0, 1, 2],
"dates": ["2019/01/01", "2019/01/02", "2019/01/03"],
"ymd_special": ["2019~01~01", "2019~01~02", "2019~01~03"],
"mdy_special": pd.Series(
["3~11~2000", "3~12~2000", "3~13~2000"], dtype="string"
),
}
)
df.ww.init(
name="df_name",
logical_types={
"ymd_special": Datetime(datetime_format="%Y~%m~%d"),
"mdy_special": Datetime(datetime_format="%m~%d~%Y"),
"dates": Datetime,
},
time_index="ymd_special",
)
assert df["dates"].dtype == "datetime64[ns]"
assert df["ymd_special"].dtype == "datetime64[ns]"
assert df["mdy_special"].dtype == "datetime64[ns]"
assert df.ww.time_index == "ymd_special"
assert isinstance(df.ww["dates"].ww.logical_type, Datetime)
assert isinstance(df.ww["ymd_special"].ww.logical_type, Datetime)
assert isinstance(df.ww["mdy_special"].ww.logical_type, Datetime)
df.ww.set_time_index("mdy_special")
assert df.ww.time_index == "mdy_special"
df = pd.DataFrame(
{
"mdy_special": pd.Series(
["3&11&2000", "3&12&2000", "3&13&2000"], dtype="string"
),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["mdy_special"].dtype == "category"
df.ww.set_types(logical_types={"mdy_special": Datetime(datetime_format="%m&%d&%Y")})
assert df["mdy_special"].dtype == "datetime64[ns]"
df.ww.set_time_index("mdy_special")
assert isinstance(df.ww["mdy_special"].ww.logical_type, Datetime)
assert df.ww.time_index == "mdy_special"
def test_timedelta_dtype_inference_on_init():
df = pd.DataFrame(
{
"delta_no_nans": (
pd.Series([pd.to_datetime("2020-09-01")] * 2)
- pd.to_datetime("2020-07-01")
),
"delta_nan": (
pd.Series([pd.to_datetime("2020-09-01"), np.nan])
- pd.to_datetime("2020-07-01")
),
"delta_NaT": (
pd.Series([pd.to_datetime("2020-09-01"), pd.NaT])
- pd.to_datetime("2020-07-01")
),
"delta_NA_specified": (
pd.Series([pd.to_datetime("2020-09-01"), pd.NA], dtype="datetime64[ns]")
- pd.to_datetime("2020-07-01")
),
}
)
df.ww.init()
assert df["delta_no_nans"].dtype == "timedelta64[ns]"
assert df["delta_nan"].dtype == "timedelta64[ns]"
assert df["delta_NaT"].dtype == "timedelta64[ns]"
assert df["delta_NA_specified"].dtype == "timedelta64[ns]"
def test_sets_category_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series(["a", "b", "c"], name=column_name),
pd.Series(["a", None, "c"], name=column_name),
pd.Series(["a", np.nan, "c"], name=column_name),
pd.Series(["a", pd.NA, "c"], name=column_name),
pd.Series(["a", pd.NaT, "c"], name=column_name),
]
logical_types = [
Categorical,
CountryCode,
Ordinal(order=["a", "b", "c"]),
PostalCode,
SubRegionCode,
]
for series in series_list:
series = series.astype("object")
for logical_type in logical_types:
if isclass(logical_type):
logical_type = logical_type()
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert df.ww.columns[column_name].logical_type == logical_type
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_object_dtype_on_init(latlong_df):
for column_name in latlong_df.columns:
ltypes = {
column_name: LatLong,
}
df = latlong_df.loc[:, [column_name]]
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, LatLong)
assert df[column_name].dtype == LatLong.primary_dtype
df_pandas = to_pandas(df[column_name])
expected_val = (3, 4)
if _is_koalas_dataframe(latlong_df):
expected_val = [3, 4]
assert df_pandas.iloc[-1] == expected_val
def test_sets_string_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series(["a", "b", "c"], name=column_name),
pd.Series(["a", None, "c"], name=column_name),
pd.Series(["a", np.nan, "c"], name=column_name),
pd.Series(["a", pd.NA, "c"], name=column_name),
]
logical_types = [
Address,
Filepath,
PersonFullName,
IPAddress,
NaturalLanguage,
PhoneNumber,
URL,
]
for series in series_list:
series = series.astype("object")
for logical_type in logical_types:
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, logical_type)
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_boolean_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series([True, False, True], name=column_name),
pd.Series([True, None, True], name=column_name),
pd.Series([True, np.nan, True], name=column_name),
pd.Series([True, pd.NA, True], name=column_name),
]
logical_types = [Boolean, BooleanNullable]
for series in series_list:
for logical_type in logical_types:
if series.isnull().any() and logical_type == Boolean:
continue
series = series.astype("object")
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, logical_type)
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_int64_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series([1, 2, 3], name=column_name),
pd.Series([1, None, 3], name=column_name),
pd.Series([1, np.nan, 3], name=column_name),
pd.Series([1, pd.NA, 3], name=column_name),
]
logical_types = [Integer, IntegerNullable, Age, AgeNullable]
for series in series_list:
series = series.astype("object")
for logical_type in logical_types:
if series.isnull().any() and logical_type in [Integer, Age]:
continue
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, logical_type)
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_float64_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series([1.1, 2, 3], name=column_name),
pd.Series([1.1, None, 3], name=column_name),
pd.Series([1.1, np.nan, 3], name=column_name),
]
logical_types = [Double, AgeFractional]
for series in series_list:
series = series.astype("object")
for logical_type in logical_types:
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, logical_type)
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_datetime64_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series(["2020-01-01", "2020-01-02", "2020-01-03"], name=column_name),
pd.Series(["2020-01-01", None, "2020-01-03"], name=column_name),
pd.Series(["2020-01-01", np.nan, "2020-01-03"], name=column_name),
pd.Series(["2020-01-01", pd.NA, "2020-01-03"], name=column_name),
pd.Series(
["2020-01-01", pd.NaT, "2020-01-03"], name=column_name, dtype="object"
),
]
logical_type = Datetime
for series in series_list:
series = series.astype("object")
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, logical_type)
assert df[column_name].dtype == logical_type.primary_dtype
def test_invalid_dtype_casting():
column_name = "test_series"
# Cannot cast a column with pd.NA to Double
series = pd.Series([1.1, pd.NA, 3], name=column_name)
ltypes = {
column_name: Double,
}
err_msg = (
"Error converting datatype for test_series from type object to type "
"float64. Please confirm the underlying data is consistent with logical type Double."
)
df = pd.DataFrame(series)
with pytest.raises(TypeConversionError, match=err_msg):
df.ww.init(logical_types=ltypes)
# Cannot cast Datetime to Double
df = pd.DataFrame({column_name: ["2020-01-01", "2020-01-02", "2020-01-03"]})
df.ww.init(logical_types={column_name: Datetime})
err_msg = (
"Error converting datatype for test_series from type datetime64[ns] to type "
"float64. Please confirm the underlying data is consistent with logical type Double."
)
with pytest.raises(TypeConversionError, match=re.escape(err_msg)):
df.ww.set_types(logical_types={column_name: Double})
# Cannot cast invalid strings to integers
series = pd.Series(["1", "two", "3"], name=column_name)
ltypes = {
column_name: Integer,
}
err_msg = (
"Error converting datatype for test_series from type object to type "
"int64. Please confirm the underlying data is consistent with logical type Integer."
)
df = pd.DataFrame(series)
with pytest.raises(TypeConversionError, match=err_msg):
df.ww.init(logical_types=ltypes)
def test_underlying_index_set_no_index_on_init(sample_df):
if _is_dask_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Dask input")
if _is_koalas_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Koalas input")
input_index = pd.Int64Index([99, 88, 77, 66])
schema_df = sample_df.copy()
schema_df.index = input_index.copy()
pd.testing.assert_index_equal(input_index, schema_df.index)
schema_df.ww.init()
assert schema_df.ww.index is None
pd.testing.assert_index_equal(input_index, schema_df.index)
sorted_df = schema_df.ww.sort_values("full_name")
assert sorted_df.ww.index is None
pd.testing.assert_index_equal(pd.Int64Index([88, 77, 99, 66]), sorted_df.index)
def test_underlying_index_set(sample_df):
if _is_dask_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Dask input")
if _is_koalas_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Koalas input")
# Sets underlying index at init
schema_df = sample_df.copy()
schema_df.ww.init(index="full_name")
assert "full_name" in schema_df.columns
assert schema_df.index.name is None
assert (schema_df.index == schema_df["full_name"]).all()
# Sets underlying index on update
schema_df = sample_df.copy()
schema_df.ww.init(index="id")
schema_df.ww.set_index("full_name")
assert schema_df.ww.index == "full_name"
assert "full_name" in schema_df.columns
assert (schema_df.index == schema_df["full_name"]).all()
assert schema_df.index.name is None
# confirm removing Woodwork index doesn't change underlying index
schema_df.ww.set_index(None)
assert schema_df.ww.index is None
assert (schema_df.index == schema_df["full_name"]).all()
def test_underlying_index_reset(sample_df):
if _is_dask_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Dask input")
if _is_koalas_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Koalas input")
specified_index = pd.Index
unspecified_index = pd.RangeIndex
sample_df.ww.init()
assert type(sample_df.index) == unspecified_index
sample_df.ww.set_index("full_name")
assert type(sample_df.index) == specified_index
copied_df = sample_df.ww.copy()
warning = "Index mismatch between DataFrame and typing information"
with pytest.warns(TypingInfoMismatchWarning, match=warning):
copied_df.ww.reset_index(drop=True, inplace=True)
assert copied_df.ww.schema is None
assert type(copied_df.index) == unspecified_index
sample_df.ww.set_index(None)
assert type(sample_df.index) == specified_index
# Use pandas operation to reset index
reset_df = sample_df.ww.reset_index(drop=True, inplace=False)
assert type(sample_df.index) == specified_index
assert type(reset_df.index) == unspecified_index
sample_df.ww.reset_index(drop=True, inplace=True)
assert type(sample_df.index) == unspecified_index
def test_underlying_index_unchanged_after_updates(sample_df):
if _is_dask_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Dask input")
if _is_koalas_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Koalas input")
sample_df.ww.init(index="full_name")
assert "full_name" in sample_df
assert sample_df.ww.index == "full_name"
assert (sample_df.index == sample_df["full_name"]).all()
copied_df = sample_df.ww.copy()
dropped_df = copied_df.ww.drop("full_name")
assert "full_name" not in dropped_df
assert dropped_df.ww.index is None
assert (dropped_df.index == sample_df["full_name"]).all()
selected_df = copied_df.ww.select("Integer")
assert "full_name" not in dropped_df
assert selected_df.ww.index is None
assert (selected_df.index == sample_df["full_name"]).all()
iloc_df = copied_df.ww.iloc[:, 2:]
assert "full_name" not in iloc_df
assert iloc_df.ww.index is None
assert (iloc_df.index == sample_df["full_name"]).all()
loc_df = copied_df.ww.loc[:, ["id", "email"]]
assert "full_name" not in loc_df
assert loc_df.ww.index is None
assert (loc_df.index == sample_df["full_name"]).all()
subset_df = copied_df.ww[["id", "email"]]
assert "full_name" not in subset_df
assert subset_df.ww.index is None
assert (subset_df.index == sample_df["full_name"]).all()
reset_tags_df = sample_df.ww.copy()
reset_tags_df.ww.reset_semantic_tags("full_name", retain_index_tags=False)
assert reset_tags_df.ww.index is None
assert (reset_tags_df.index == sample_df["full_name"]).all()
remove_tags_df = sample_df.ww.copy()
remove_tags_df.ww.remove_semantic_tags({"full_name": "index"})
assert remove_tags_df.ww.index is None
assert (remove_tags_df.index == sample_df["full_name"]).all()
set_types_df = sample_df.ww.copy()
set_types_df.ww.set_types(
semantic_tags={"full_name": "new_tag"}, retain_index_tags=False
)
assert set_types_df.ww.index is None
assert (set_types_df.index == sample_df["full_name"]).all()
popped_df = sample_df.ww.copy()
popped_df.ww.pop("full_name")
assert popped_df.ww.index is None
assert (popped_df.index == sample_df["full_name"]).all()
def test_accessor_already_sorted(sample_unsorted_df):
if _is_dask_dataframe(sample_unsorted_df):
pytest.xfail("Sorting dataframe is not supported with Dask input")
if _is_koalas_dataframe(sample_unsorted_df):
pytest.xfail("Sorting dataframe is not supported with Koalas input")
schema_df = sample_unsorted_df.copy()
schema_df.ww.init(name="schema", index="id", time_index="signup_date")
assert schema_df.ww.time_index == "signup_date"
assert isinstance(
schema_df.ww.columns[schema_df.ww.time_index].logical_type, Datetime
)
sorted_df = (
to_pandas(sample_unsorted_df)
.sort_values(["signup_date", "id"])
.set_index("id", drop=False)
)
sorted_df.index.name = None
pd.testing.assert_frame_equal(
sorted_df, to_pandas(schema_df), check_index_type=False, check_dtype=False
)
schema_df = sample_unsorted_df.copy()
schema_df.ww.init(
name="schema", index="id", time_index="signup_date", already_sorted=True
)
assert schema_df.ww.time_index == "signup_date"
assert isinstance(
schema_df.ww.columns[schema_df.ww.time_index].logical_type, Datetime
)
unsorted_df = to_pandas(sample_unsorted_df.set_index("id", drop=False))
unsorted_df.index.name = None
pd.testing.assert_frame_equal(
unsorted_df, to_pandas(schema_df), check_index_type=False, check_dtype=False
)
def test_ordinal_with_order(sample_series):
if _is_koalas_series(sample_series) or _is_dask_series(sample_series):
pytest.xfail(
"Fails with Dask and Koalas - ordinal data validation not compatible"
)
ordinal_with_order = Ordinal(order=["a", "b", "c"])
schema_df = pd.DataFrame(sample_series)
schema_df.ww.init(logical_types={"sample_series": ordinal_with_order})
column_logical_type = schema_df.ww.logical_types["sample_series"]
assert isinstance(column_logical_type, Ordinal)
assert column_logical_type.order == ["a", "b", "c"]
schema_df = | pd.DataFrame(sample_series) | pandas.DataFrame |
#%%
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pandas as pd
import phd.viz
import phd.stats
import pickle
colors, palette = phd.viz.phd_style()
data = pd.read_csv('../../data/ch2_induction/RazoMejia_2018.csv', comment='#')
params = | pd.read_csv('../../data/ch2_induction/RazoMejia_KaKi_estimates.csv') | pandas.read_csv |
from unittest import TestCase
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import ts_charting.figure as figure
from ts_charting.figure import process_series
class Testprocess_data(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_already_aligned(self):
plot_index = pd.date_range(start="2000", freq="D", periods=100)
series = pd.Series(range(100), index=plot_index)
plot_series = process_series(series, plot_index)
tm.assert_almost_equal(series, plot_series)
tm.assert_almost_equal(plot_series.index, plot_index)
def test_partial_plot(self):
"""
Test plotting series that is a subset of plot_index.
Should align and fill with nans
"""
plot_index = pd.date_range(start="2000", freq="D", periods=100)
series = pd.Series(range(100), index=plot_index)
series = series[:50] # only first 50
plot_series = process_series(series, plot_index)
# have same index
| tm.assert_almost_equal(plot_series.index, plot_index) | pandas.util.testing.assert_almost_equal |
import numpy as np
import pandas as pd
# 1. load dataset
ratings = pd.read_csv('chapter02/data/movie_rating.csv')
movie_ratings = pd.pivot_table(
ratings,
values='rating',
index='title',
columns='critic'
)
# 2. calculate similarity
def calcualte_norm(u):
norm_u = 0.0
for ui in u:
if np.isnan(ui):
continue
norm_u += (ui ** 2)
return np.sqrt(norm_u)
def calculate_cosine_similarity(u, v):
norm_u = calcualte_norm(u)
norm_v = calcualte_norm(v)
denominator = norm_u * norm_v
numerator = 0.0
for ui, vi in zip(u, v):
if np.isnan(ui) or np.isnan(vi):
continue
numerator += (ui * vi)
similarity = numerator / denominator
return similarity
titles = movie_ratings.index
sim_items = pd.DataFrame(0, columns=titles, index=titles, dtype=float)
for src in titles:
for dst in titles:
src_vec = movie_ratings.loc[src, :].values
dst_vec = movie_ratings.loc[dst, :].values
similarity = calculate_cosine_similarity(src_vec, dst_vec)
sim_items.loc[src, dst] = similarity
print(sim_items)
# 3. Make Prediction & Recommendation
user_id = 5
ratings_critic = movie_ratings.loc[:, [movie_ratings.columns[user_id]]]
ratings_critic.columns = ['rating']
titles_na_critic = ratings_critic[pd.isna(ratings_critic.rating)].index
ratings_t = ratings.loc[ratings.critic == movie_ratings.columns[user_id]]
ratings_t = ratings_t.reset_index(drop=True)
x = sim_items.loc[:, titles_na_critic]
ratings_t = pd.merge(ratings_t, x, on='title')
print(ratings_t)
result_dict = {'title': list(), 'rating': list(), 'similarity': list()}
for row in ratings_t.iterrows():
for title in titles_na_critic:
result_dict['title'].append(title)
result_dict['rating'].append(row[1]['rating'])
result_dict['similarity'].append(row[1][title])
result = | pd.DataFrame(result_dict) | pandas.DataFrame |
import datetime
import functools
from decimal import Decimal
import pandas as pd
from dateutil.parser import parse as time_parser
from enums import BuySell
class LocalTradeRecord:
def __init__(self):
self.path = "data/trade_record.csv"
# number, stock_id, buy_time, sell_time, buy_price, sell_price, volumn, buy_cost, sell_cost, revenue
self.df = pd.read_csv(self.path,
dtype={0: int, 1: str, 2: str, 3: str, 4: str, 5: str, 6: float, 7: str, 8: str, 9: str})
# 交易紀錄索引值
def getLastNumber(self):
last_row = list(self.df.iloc[-1])
return last_row[0]
def getLastBuyTime(self, stock_id):
sub_df = self.df[self.df["stock_id"] == stock_id].copy()
sub_df.sort_values(by=["number", "buy_time"], inplace=True)
last_buy = list(sub_df.iloc[-1])[2]
return time_parser(last_buy)
def saveTradeRecord(self, stock_id, buy_time: datetime.datetime, sell_time: datetime.datetime, buy_price: Decimal,
sell_price: Decimal, volumn: int, buy_cost: Decimal, sell_cost: Decimal, revenue: Decimal):
"""
:param stock_id:
:param buy_time:
:param sell_time:
:param buy_price:
:param sell_price:
:param volumn:
:param buy_cost:
:param sell_cost:
:param revenue:
:return:
"""
number = self.getLastNumber() + 1
data = {"number": number,
"stock_id": stock_id,
"buy_time": str(buy_time.date()),
"sell_time": str(sell_time.date()),
"buy_price": str(buy_price),
"sell_price": str(sell_price),
"volumn": volumn,
"buy_cost": str(buy_cost),
"sell_cost": str(sell_cost),
"revenue": str(revenue)}
self.df = self.df.append(data, ignore_index=True)
self.df.to_csv(self.path, index=False)
return data
# # 移除庫存
# self.api.removeInventory(guid=guid)
#
# # TODO: 寫出交易紀錄,並更新資金(funds.csv)
# self.local_capital.allocateRevenue(deal_time=sell_time, remark=str(number), trade_revenue=trade_revenue)
# # self.api.recordTrading(stock_id=stock_id,
# # buy_price=str(buy_price),
# # sell_price=str(sell_price),
# # vol=buy_volumn,
# # buy_time=buy_time,
# # sell_time=sell_time,
# # buy_cost=str(buy_cost),
# # sell_cost=str(sell_cost),
# # revenue=str(revenue - buy_cost - sell_cost))
#
# self.logger.info(f"record: {record}", extra=self.extra)
# f.write(record)
def recordDividend(self, stock_id: str, revenue: str, pay_time: datetime.datetime = datetime.datetime.today()):
last_buy = self.getLastBuyTime(stock_id=stock_id)
buy_time = last_buy.strftime("%Y-%m-%d")
sell_time = pay_time.strftime("%Y-%m-%d")
number = self.getLastNumber() + 1
data = {"number": number,
"stock_id": stock_id,
"buy_time": buy_time,
"sell_time": sell_time,
"buy_price": "0",
"sell_price": "0",
"volumn": 0,
"buy_cost": "0",
"sell_cost": "0",
"revenue": revenue}
self.df = self.df.append(data, ignore_index=True)
self.df.to_csv(self.path, index=False)
return data
def renumber(self):
n_data = len(self.df)
numbers = list(range(1, n_data + 1))
self.df["number"] = numbers
self.df.to_csv(self.path, index=False)
def sortOperates(operates):
"""
將操作做排序,排序優先順序為: 日期(越早越前) -> 操作類型(buy 優先,再來才是 sell)
xxx_operate -> [datetime, buy/sell, cost/income]
:param operates: 所有操作
:return:
"""
def compareOperates(op1, op2):
"""
sorted()也是一個高階函式,它可以接收一個比較函式來實現自定義排序,
比較函式的定義是,傳入兩個待比較的元素 x, y,
如果 x 應該排在 y 的前面,返回 -1,
如果 x 應該排在 y 的後面,返回 1。
如果 x 和 y 相等,返回 0。
def customSort(x, y):
if x > y:
return -1
if x < y:
return 1
return 0
print(sorted([2,4,5,7,3], key=functools.cmp_to_key(customSort)))
-> [7, 5, 4, 3, 2]
:param op1: 請求 1
:param op2: 請求 2
:return:
"""
# datetime, buy/sell, cost/income
time1, buy_sell1, _ = op1
time2, buy_sell2, _ = op2
# 時間越早排越前面
if time1 < time2:
return -1
elif time1 > time2:
return 1
# 數量少的排前面
if buy_sell1.value < buy_sell2.value:
return -1
# 數量多的排後面
elif buy_sell1.value > buy_sell2.value:
return 1
else:
return 0
# 透過自定義規則函式 compareRequests 來對 requests 來進行排序
return sorted(operates, key=functools.cmp_to_key(compareOperates))
def evaluateTradingPerformance():
path = "data/trade_record.csv"
# number,stock_id,buy_time,sell_time,buy_price,sell_price,volumn,buy_cost,sell_cost,revenue
df = pd.read_csv(path)
df["buy_time"] = pd.to_datetime(df["buy_time"])
df["sell_time"] = pd.to_datetime(df["sell_time"])
# print(df)
# 排除使用策略時的交易
# df = df[df["sell_time"] > datetime.datetime(2021, 1, 1)]
# 只使用策略時的交易
df = df[df["sell_time"] > datetime.datetime(2021, 1, 1)]
# TODO: datetime buy/sell cost/income
n_row = len(df)
operates = []
for r in range(n_row):
row = df.iloc[r, :]
(number, stock_id, buy_time, sell_time,
buy_price, sell_price, volumn, buy_cost, sell_cost, revenue) = row.values
operates.append([buy_time, BuySell.Buy, buy_cost])
# operates.append([buy_time, "buy", buy_cost])
# sell_cost 在進來前就被扣掉,應該可以忽略
operates.append([sell_time, BuySell.Sell, buy_cost + revenue])
# operates.append([sell_time, "sell", buy_cost + revenue])
# print(operates)
operates = sortOperates(operates)
# print(operates)
input_funds = 0
funds = 0
for operate in operates:
_, op, fund = operate
if op == BuySell.Buy:
if fund > funds:
gap = fund - funds
input_funds += gap
funds += gap
print(f"資金不足,增資 {gap} 元, funds: {funds}, input_funds: {input_funds}")
funds -= fund
print(f"購買花費 {fund} 元, funds: {funds}, input_funds: {input_funds}")
else:
funds += fund
print(f"售出收入 {fund} 元, funds: {funds}, input_funds: {input_funds}")
return_rate = funds / input_funds
during = operates[-1][0] - operates[0][0]
n_year = during / datetime.timedelta(days=365.25)
year_index = 1.0 / n_year
annually_return_rate = pow(return_rate, year_index)
print(f"{during.days} days, 報酬率: {return_rate}, 年報酬率: {annually_return_rate}")
# 交易紀錄索引值
def getLastNumber():
path = "data/trade_record.csv"
with open(path, "r") as file:
for line in file:
pass
content = line.split(",")
number = content[0]
return int(number)
def renumber():
path = "data/trade_record.csv"
df = | pd.read_csv(path,
dtype={0: int, 1: str, 2: str, 3: str, 4: str, 5: str, 6: str, 7: str, 8: str, 9: str}) | pandas.read_csv |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
import pytz
from freezegun import freeze_time
from pandas import Timestamp
from pandas._testing import assert_frame_equal
from wetterdienst.exceptions import StartDateEndDateError
from wetterdienst.metadata.period import Period
from wetterdienst.metadata.resolution import Resolution
from wetterdienst.metadata.timezone import Timezone
from wetterdienst.provider.dwd.observation import (
DwdObservationDataset,
DwdObservationPeriod,
DwdObservationResolution,
)
from wetterdienst.provider.dwd.observation.api import DwdObservationRequest
from wetterdienst.provider.dwd.observation.metadata.parameter import (
DwdObservationParameter,
)
from wetterdienst.settings import Settings
def test_dwd_observation_data_api():
request = DwdObservationRequest(
parameter=["precipitation_height"],
resolution="daily",
period=["recent", "historical"],
)
assert request == DwdObservationRequest(
parameter=[DwdObservationParameter.DAILY.PRECIPITATION_HEIGHT],
resolution=Resolution.DAILY,
period=[Period.HISTORICAL, Period.RECENT],
start_date=None,
end_date=None,
)
assert request.parameter == [
(
DwdObservationParameter.DAILY.CLIMATE_SUMMARY.PRECIPITATION_HEIGHT,
DwdObservationDataset.CLIMATE_SUMMARY,
)
]
@pytest.mark.remote
def test_dwd_observation_data_dataset():
"""Request a parameter set"""
expected = DwdObservationRequest(
parameter=["kl"],
resolution="daily",
period=["recent", "historical"],
).filter_by_station_id(station_id=(1,))
given = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL, DwdObservationPeriod.RECENT],
start_date=None,
end_date=None,
).filter_by_station_id(
station_id=(1,),
)
assert given == expected
expected = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL, DwdObservationPeriod.RECENT],
).filter_by_station_id(
station_id=(1,),
)
given = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL, DwdObservationPeriod.RECENT],
start_date=None,
end_date=None,
).filter_by_station_id(
station_id=(1,),
)
assert expected == given
assert expected.parameter == [
(
DwdObservationDataset.CLIMATE_SUMMARY,
DwdObservationDataset.CLIMATE_SUMMARY,
)
]
def test_dwd_observation_data_parameter():
"""Test parameter given as single value without dataset"""
request = DwdObservationRequest(
parameter=["precipitation_height"],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [
(
DwdObservationParameter.DAILY.CLIMATE_SUMMARY.PRECIPITATION_HEIGHT,
DwdObservationDataset.CLIMATE_SUMMARY,
)
]
request = DwdObservationRequest(
parameter=["climate_summary"],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [(DwdObservationDataset.CLIMATE_SUMMARY, DwdObservationDataset.CLIMATE_SUMMARY)]
def test_dwd_observation_data_parameter_dataset_pairs():
"""Test parameters given as parameter - dataset pair"""
request = DwdObservationRequest(
parameter=[("climate_summary", "climate_summary")],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [(DwdObservationDataset.CLIMATE_SUMMARY, DwdObservationDataset.CLIMATE_SUMMARY)]
request = DwdObservationRequest(
parameter=[("precipitation_height", "precipitation_more")],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [
(
DwdObservationParameter.DAILY.PRECIPITATION_MORE.PRECIPITATION_HEIGHT,
DwdObservationDataset.PRECIPITATION_MORE,
)
]
@pytest.mark.remote
def test_dwd_observation_data_fails():
# station id
assert (
DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
period=[DwdObservationPeriod.HISTORICAL],
resolution=DwdObservationResolution.DAILY,
)
.filter_by_station_id(
station_id=["test"],
)
.df.empty
)
with pytest.raises(StartDateEndDateError):
DwdObservationRequest(
parameter=["abc"],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date="1951-01-01",
)
def test_dwd_observation_data_dates():
# time input
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
).filter_by_station_id(
station_id=[1],
)
assert request == DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[
DwdObservationPeriod.HISTORICAL,
],
start_date=datetime(1971, 1, 1),
end_date=datetime(1971, 1, 1),
).filter_by_station_id(
station_id=[1],
)
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL],
end_date="1971-01-01",
).filter_by_station_id(
station_id=[1],
)
assert request == DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[
DwdObservationPeriod.HISTORICAL,
],
start_date=datetime(1971, 1, 1),
end_date=datetime(1971, 1, 1),
).filter_by_station_id(
station_id=[1],
)
with pytest.raises(StartDateEndDateError):
DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date="1951-01-01",
)
def test_request_period_historical():
# Historical period expected
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
)
assert request.period == [
Period.HISTORICAL,
]
def test_request_period_historical_recent():
# Historical and recent period expected
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(days=400),
)
assert request.period == [
Period.HISTORICAL,
Period.RECENT,
]
def test_request_period_historical_recent_now():
# Historical, recent and now period expected
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date=pd.Timestamp(datetime.utcnow()),
)
assert request.period == [
Period.HISTORICAL,
Period.RECENT,
Period.NOW,
]
@freeze_time(datetime(2022, 1, 29, 1, 30, tzinfo=pytz.timezone(Timezone.GERMANY.value)))
def test_request_period_recent_now():
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(hours=2),
)
assert request.period == [Period.RECENT, Period.NOW]
@freeze_time(datetime(2022, 1, 29, 2, 30, tzinfo=pytz.timezone(Timezone.GERMANY.value)))
def test_request_period_now():
# Now period
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(hours=2),
)
assert request.period == [Period.NOW]
@freeze_time("2021-03-28T18:38:00+02:00")
def test_request_period_now_fixeddate():
# Now period
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(hours=2),
)
assert Period.NOW in request.period
def test_request_period_empty():
# No period (for example in future)
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) + pd.Timedelta(days=720),
)
assert request.period == []
@pytest.mark.remote
def test_dwd_observation_data_result_missing_data():
"""Test for DataFrame having empty values for dates where the station should not
have values"""
Settings.tidy = True
Settings.humanize = True
Settings.si_units = True
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-27", # few days before official start
end_date="1934-01-04", # few days after official start,
).filter_by_station_id(
station_id=[1048],
)
# Leave only one column to potentially contain NaN which is VALUE
df = request.values.all().df.drop("quality", axis=1)
df_1933 = df[df["date"].dt.year == 1933]
df_1934 = df[df["date"].dt.year == 1934]
assert not df_1933.empty and df_1933.dropna().empty
assert not df_1934.empty and not df_1934.dropna().empty
request = DwdObservationRequest(
parameter=DwdObservationParameter.HOURLY.TEMPERATURE_AIR_MEAN_200,
resolution=DwdObservationResolution.HOURLY,
start_date="2020-06-09 12:00:00", # no data at this time (reason unknown)
end_date="2020-06-09 12:00:00",
).filter_by_station_id(
station_id=["03348"],
)
df = request.values.all().df
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["03348"]),
"dataset": pd.Categorical(["temperature_air"]),
"parameter": pd.Categorical(["temperature_air_mean_200"]),
"date": [datetime(2020, 6, 9, 12, 0, 0, tzinfo=pytz.UTC)],
"value": pd.Series([pd.NA], dtype=pd.Float64Dtype()).astype(float),
"quality": pd.Series([pd.NA], dtype=pd.Float64Dtype()).astype(float),
}
),
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_result_tabular():
"""Test for actual values (tabular)"""
Settings.tidy = False
Settings.humanize = False
Settings.si_units = False
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-31", # few days before official start
end_date="1934-01-01", # few days after official start,
).filter_by_station_id(
station_id=[1048],
)
df = request.values.all().df
assert list(df.columns.values) == [
"station_id",
"dataset",
"date",
"qn_3",
"fx",
"fm",
"qn_4",
"rsk",
"rskf",
"sdk",
"shk_tag",
"nm",
"vpm",
"pm",
"tmk",
"upm",
"txk",
"tnk",
"tgk",
]
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["01048"] * 2),
"dataset": pd.Categorical(["climate_summary"] * 2),
"date": [
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
],
"qn_3": pd.Series([pd.NA, pd.NA], dtype=pd.Int64Dtype()),
"fx": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"fm": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"qn_4": pd.Series([pd.NA, 1], dtype=pd.Int64Dtype()),
"rsk": pd.to_numeric([pd.NA, 0.2], errors="coerce"),
"rskf": pd.to_numeric([pd.NA, 8], errors="coerce"),
"sdk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"shk_tag": pd.Series([pd.NA, 0], dtype=pd.Int64Dtype()),
"nm": pd.to_numeric([pd.NA, 8.0], errors="coerce"),
"vpm": pd.to_numeric([pd.NA, 6.4], errors="coerce"),
"pm": pd.to_numeric([pd.NA, 1008.60], errors="coerce"),
"tmk": pd.to_numeric([pd.NA, 0.5], errors="coerce"),
"upm": pd.to_numeric([pd.NA, 97.00], errors="coerce"),
"txk": pd.to_numeric([pd.NA, 0.7], errors="coerce"),
"tnk": pd.to_numeric([pd.NA, 0.2], errors="coerce"),
"tgk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
}
),
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_result_tabular_metric():
"""Test for actual values (tabular) in metric units"""
Settings.tidy = False
Settings.humanize = False
Settings.si_units = True
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-31", # few days before official start
end_date="1934-01-01", # few days after official start,
).filter_by_station_id(
station_id=[1048],
)
df = request.values.all().df
assert list(df.columns.values) == [
"station_id",
"dataset",
"date",
"qn_3",
"fx",
"fm",
"qn_4",
"rsk",
"rskf",
"sdk",
"shk_tag",
"nm",
"vpm",
"pm",
"tmk",
"upm",
"txk",
"tnk",
"tgk",
]
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["01048"] * 2),
"dataset": pd.Categorical(["climate_summary"] * 2),
"date": [
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
],
"qn_3": pd.Series([pd.NA, pd.NA], dtype=pd.Int64Dtype()),
"fx": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"fm": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"qn_4": pd.Series([pd.NA, 1], dtype= | pd.Int64Dtype() | pandas.Int64Dtype |
"""Function that returns data from field AWS
"""
# External modules
import sys, os, glob, json
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.backends.backend_pdf import PdfPages
from pandas.plotting import register_matplotlib_converters
import math
import time
from pathlib import Path
from tqdm import tqdm
import logging
import coloredlogs
# Locals
dirname = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
sys.path.append(dirname)
from src.utils.settings import config
def get_field(loc="schwarzsee19"):
with open("data/common/constants.json") as f:
CONSTANTS = json.load(f)
SITE, FOLDER = config(loc)
if loc == "guttannen22":
cols_old = [
"TIMESTAMP",
"T_probe_Avg",
"RH_probe_Avg",
"amb_press_Avg",
"WS",
"SnowHeight",
"SW_IN",
"SW_OUT",
"LW_IN",
"LW_OUT",
"H",
"Tice_Avg(1)",
"Tice_Avg(2)",
"Tice_Avg(3)",
"Tice_Avg(4)",
"Tice_Avg(5)",
"Tice_Avg(6)",
"Tice_Avg(7)",
"Tice_Avg(8)",
]
cols_new = ["time", "temp", "RH", "press", "wind", "snow_h", "SW_global", "SW_out", "LW_in", "LW_out",
"Qs_meas", "T_ice_1", "T_ice_2", "T_ice_3", "T_ice_4", "T_ice_5","T_ice_6","T_ice_7","T_ice_8"]
cols_dict = dict(zip(cols_old, cols_new))
path = FOLDER["raw"] + "CardConvert/"
all_files = glob.glob(path + "*.dat")
li = []
for file in all_files:
df = pd.read_csv(
file,
sep=",",
skiprows=[0,2,3],
parse_dates=["TIMESTAMP"],
)
df = df[cols_old]
df = df.rename(columns=cols_dict)
for col in df.columns:
if col != 'time':
df[col] = df[col].astype(float)
df = df.round(2)
li.append(df)
df = pd.concat(li, axis=0, ignore_index=True)
df = df.set_index("time").sort_index()
df = df[SITE["start_date"] :]
df = df.reset_index()
"""Correct data errors"""
df= df.replace("NAN", np.NaN)
df = df.set_index("time").resample("H").mean().reset_index()
df["missing_type"] = "-"
df.loc[df.wind > 50, "wind"] = np.NaN
df.loc[df.Qs_meas > 300, "Qs_meas"] = np.NaN
df.loc[df.Qs_meas < -300, "Qs_meas"] = np.NaN
df.loc[:, "Qs_meas"] = df["Qs_meas"].interpolate()
df["alb"] = df["SW_out"]/df["SW_global"]
df.loc[df.alb > 1, "alb"] = np.NaN
df.loc[df.alb < 0, "alb"] = np.NaN
df.loc[:, "alb"] = df["alb"].interpolate()
df['ppt'] = df.snow_h.diff()*10*CONSTANTS['RHO_S']/CONSTANTS['RHO_W'] # mm of snowfall w.e. in one hour
df.loc[df.ppt<1, "ppt"] = 0 # Assuming 1 mm error
print(df['ppt'].describe())
# print(df.time[df.T_ice_8.isna()].values[0])
df['T_bulk_meas'] = (df["T_ice_2"] + df["T_ice_3"] + df["T_ice_4"]+ df["T_ice_5"]+ df["T_ice_6"]+df["T_ice_7"])/6
# df['T_bulk_meas'] = (df["T_ice_2"] + df["T_ice_3"] + df["T_ice_4"]+ df["T_ice_5"]+ df["T_ice_6"])/5
df['T_G'] = df["T_ice_1"]
cols = [
"time",
"temp",
"RH",
"wind",
"SW_global",
"alb",
"press",
"missing_type",
"LW_in",
"Qs_meas",
# "ppt",
"snow_h",
"T_bulk_meas",
"T_G",
]
df_out = df[cols]
if df_out.isna().values.any():
print(df_out.isna().sum())
df_out.to_csv(FOLDER["input"] + "field.csv", index=False)
fig, ax = plt.subplots()
x = df.time
# ax.plot(x,df["T_ice_7"])
# ax.plot(x,df["T_ice_6"])
# ax.plot(x,df["T_ice_8"] - df["T_ice_7"])
# ax.plot(x,df["T_ice_7"] - df["T_ice_6"])
# ax.plot(x,df["T_ice_6"] - df["T_ice_5"])
ax.plot(x,df["T_ice_5"] - df["T_ice_4"])
ax.plot(x,df["T_ice_4"] - df["T_ice_3"])
ax.plot(x,df["T_ice_3"] - df["T_ice_2"])
# ax.plot(x,df["T_ice_3"])
ax.set_ylim([-3,0.1])
ax.legend()
ax.xaxis.set_major_locator(mdates.WeekdayLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter("%b %d"))
ax.xaxis.set_minor_locator(mdates.DayLocator())
fig.autofmt_xdate()
plt.savefig(
FOLDER['fig'] + "temps.png",
bbox_inches="tight",
dpi=300,
)
plt.clf()
return df_out
if loc == "gangles21":
col_list = [
"TIMESTAMP",
"AirTC_Avg",
"RH",
"WS",
]
cols = ["temp", "RH", "wind"]
df_in = pd.read_csv(
FOLDER["raw"] + "/Gangles_Table15Min.dat",
sep=",",
skiprows=[0, 2, 3, 4],
parse_dates=["TIMESTAMP"],
)
df_in = df_in[col_list]
df_in.rename(
columns={
"TIMESTAMP": "time",
"AirTC_Avg": "temp",
"RH_probe_Avg": "RH",
"WS": "wind",
},
inplace=True,
)
df_in1 = pd.read_csv(
FOLDER["raw"] + "/Gangles_Table60Min.dat",
sep=",",
skiprows=[0, 2, 3],
parse_dates=["TIMESTAMP"],
)
df_in1.rename(
columns={
"TIMESTAMP": "time",
"BP_mbar": "press", # mbar same as hPa
},
inplace=True,
)
for col in df_in1:
if col != "time":
df_in1[col] = pd.to_numeric(df_in1[col], errors="coerce")
df_in = df_in.set_index("time")
df_in1 = df_in1.set_index("time")
df_in1 = df_in1.reindex(
pd.date_range(df_in1.index[0], df_in1.index[-1], freq="15Min"),
fill_value=np.NaN,
)
df_in = df_in.replace("NAN", np.NaN)
df_in1 = df_in1.replace("NAN", np.NaN)
df_in1 = df_in1.resample("15Min").interpolate("linear")
df_in.loc[:, "press"] = df_in1["press"]
df_in = df_in.replace("NAN", np.NaN)
if df_in.isnull().values.any():
print("Warning: Null values present")
print(df_in[cols].isnull().sum())
df_in = df_in.round(3)
df_in = df_in.reset_index()
df_in.rename(columns={"index": "time"},inplace=True,)
start_date = datetime(2020, 12, 14)
df_in = df_in.set_index("time")
df_in = df_in[start_date:]
df1 = pd.read_csv(
FOLDER["raw"] + "/HIAL_input_field.csv",
sep=",",
parse_dates=["When"],
)
df1 = df1.rename(columns={"When": "time"})
df = df_in
df1 = df1.set_index("time")
cols = ["SW_global"]
for col in cols:
df.loc[:, col] = df1[col]
df = df.reset_index()
df = df[df.columns.drop(list(df.filter(regex="Unnamed")))]
df = df.dropna()
# df.to_csv("outputs/" + loc + "_input_field.csv")
mask = df["SW_global"] < 0
mask_index = df[mask].index
df.loc[mask_index, "SW_global"] = 0
# diffuse_fraction = 0
# df["SW_diffuse"] = diffuse_fraction * df.SW_global
# df["SW_direct"] = (1-diffuse_fraction)* df.SW_global
df = df.set_index("time").resample("H").mean().reset_index()
df["ppt"] = 0
df["missing_type"] = "-"
# df["cld"] = 0
df.to_csv(FOLDER["input"] + "field.csv")
return df
if loc == "guttannen20":
df_in = pd.read_csv(
FOLDER["raw"] + "field.txt",
header=None,
encoding="latin-1",
skiprows=7,
sep="\\s+",
index_col=False,
names=[
"Date",
"Time",
"Discharge",
"Wind Direction",
"Wind Speed",
"Maximum Wind Speed",
"Temperature",
"Humidity",
"Pressure",
"Pluviometer",
],
)
types_dict = {
"Date": str,
"Time": str,
"Discharge": float,
"Wind Direction": float,
"Wind Speed": float,
"Temperature": float,
"Humidity": float,
"Pressure": float,
"Pluviometer": float,
}
for col, col_type in types_dict.items():
df_in[col] = df_in[col].astype(col_type)
df_in["time"] = pd.to_datetime(df_in["Date"] + " " + df_in["Time"])
df_in["time"] = pd.to_datetime(df_in["time"], format="%Y.%m.%d %H:%M:%S")
df_in = df_in.drop(["Pluviometer", "Date", "Time"], axis=1)
df_in = df_in.set_index("time").resample("H").mean().reset_index()
mask = (df_in["time"] >= SITE["start_date"]) & (
df_in["time"] <= SITE["end_date"]
)
df_in = df_in.loc[mask]
df_in = df_in.reset_index()
days = pd.date_range(start=SITE["start_date"], end=SITE["end_date"], freq="H")
days = pd.DataFrame({"time": days})
df = pd.merge(
df_in[
[
"time",
"Discharge",
"Wind Speed",
"Temperature",
"Humidity",
"Pressure",
]
],
days,
on="time",
)
df = df.round(3)
# CSV output
df.rename(
columns={
"Wind Speed": "wind",
"Temperature": "temp",
"Humidity": "RH",
"Pressure": "press",
},
inplace=True,
)
logger.info(df_in.head())
logger.info(df_in.tail())
df.to_csv(FOLDER["input"] + "field.csv")
if loc == "guttannen21":
df_in = pd.read_csv(
FOLDER["raw"] + "field.txt",
header=None,
encoding="latin-1",
skiprows=7,
sep="\\s+",
names=[
"Date",
"Time",
"Wind Direction",
"Wind Speed",
"Maximum Wind Speed",
"Temperature",
"Humidity",
"Pressure",
"Pluviometer",
],
)
types_dict = {
"Date": str,
"Time": str,
"Wind Direction": float,
"Wind Speed": float,
"Temperature": float,
"Humidity": float,
"Pressure": float,
"Pluviometer": float,
}
for col, col_type in types_dict.items():
df_in[col] = df_in[col].astype(col_type)
df_in["time"] = pd.to_datetime(df_in["Date"] + " " + df_in["Time"])
df_in["time"] = pd.to_datetime(df_in["time"], format="%Y.%m.%d %H:%M:%S")
df_in = df_in.drop(["Pluviometer", "Date", "Time"], axis=1)
logger.debug(df_in.head())
logger.debug(df_in.tail())
df_in = df_in.set_index("time").resample("H").mean().reset_index()
mask = (df_in["time"] >= SITE["start_date"]) & (
df_in["time"] <= SITE["end_date"]
)
df_in = df_in.loc[mask]
df_in = df_in.reset_index()
days = | pd.date_range(start=SITE["start_date"], end=SITE["end_date"], freq="H") | pandas.date_range |
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from joblib import load
from app import app
import pandas as pd
import pickle
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
column1 = dbc.Col(
[
dcc.Markdown(
"""
## Predictions
""", className='mb-3'
),
dcc.Input(id='tokens',
placeholder='Enter a value...',
type='text',
value=''
),
# for _ in ALLOWED_TYPES
],
md=7,
)
column2 = dbc.Col(
[
html.H2('Strain Recommender', className='mb-5'),
html.Div(id='prediction-content', className='lead'),
html.Img(src='assets/meditation1.jpeg', className='img-fluid')
]
)
layout = dbc.Row([column1, column2])
@app.callback(
Output('prediction-content', 'children'),
[Input('tokens','value')],
)
# def predict(tokens):
# # df = pd.DataFrame(
# # columns=['tokens'],
# # data=[[tokens.astype(str)]]
# # )
# y_pred = pipeline.predict([tokens])[0]
# return f'{y_pred:10,.2f} Strain'
def predict(tokens):
df = pd.read_csv('notebooks/cannabis.csv')
df = df.dropna(subset = ['Description'])
tfidf = pickle.load(open("notebooks/vect_01.pkl", "rb"))
nn = pickle.load(open("notebooks/knn_01.pkl", "rb"))
# Transform
request = | pd.Series(tokens) | pandas.Series |
"""
This "new" version of the code uses a different dataframe for descriptors:
'pan_drugs_dragon7_descriptors.tsv' instead of 'combined_pubchem_dragon7_descriptors.tsv'
"""
import warnings
warnings.filterwarnings('ignore')
import os
import sys
from pathlib import Path
import argparse
from time import time
from pprint import pformat
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer, KNNImputer
# github.com/mtg/sms-tools/issues/36
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
# Utils
from utils.classlogger import Logger
from utils.utils import dump_dict, get_print_func, dropna
from utils.impute import impute_values
# from utils.resample import flatten_dist
from ml.scale import scale_fea
from ml.data import extract_subset_fea
# File path
filepath = Path(__file__).resolve().parent
# Settings
na_values = ['na', '-', '']
fea_prfx_dct = {'ge': 'ge_', 'cnv': 'cnv_', 'snp': 'snp_',
'dd': 'dd_', 'fng': 'fng_'}
def create_basename(args):
""" Name to characterize the data. Can be used for dir name and file name. """
ls = args.drug_fea + args.cell_fea
if args.src is None:
name = '.'.join(ls)
else:
src_names = '_'.join(args.src)
name = '.'.join([src_names] + ls)
name = 'data.' + name
return name
def create_outdir(outdir, args):
""" Creates output dir. """
basename = create_basename(args)
outdir = Path(outdir, basename)
os.makedirs(outdir, exist_ok=True)
return outdir
def groupby_src_and_print(df, print_fn=print):
print_fn(df.groupby('SOURCE').agg({'CELL': 'nunique', 'DRUG': 'nunique'}).reset_index())
def add_fea_prfx(df, prfx: str, id0: int):
""" Add prefix feature columns. """
return df.rename(columns={s: prfx+str(s) for s in df.columns[id0:]})
def load_rsp(fpath, src=None, r2fit_th=None, print_fn=print):
""" Load drug response data. """
rsp = pd.read_csv(fpath, sep='\t', na_values=na_values)
rsp.drop(columns='STUDY', inplace=True) # gives error when saves in 'parquet' format
# print(rsp.dtypes)
print_fn('\nAll samples (original).')
print_fn(f'rsp.shape {rsp.shape}')
groupby_src_and_print(rsp, print_fn=print_fn)
print_fn(rsp.SOURCE.value_counts())
# Drop bad samples
if r2fit_th is not None:
# Yitan
# TODO: check this (may require a more rigorous filtering)
# print_fn('\n\nDrop bad samples ...')
# id_drop = (rsp['AUC'] == 0) & (rsp['EC50se'] == 0) & (rsp['R2fit'] == 0)
# rsp = rsp.loc[~id_drop,:]
# print_fn(f'Dropped {sum(id_drop)} rsp data points.')
# print_fn(f'rsp.shape {rsp.shape}')
print_fn('\nDrop samples with low R2fit.')
print_fn('Samples with bad fit.')
id_drop = rsp['R2fit'] <= r2fit_th
rsp_bad_fit = rsp.loc[id_drop, :].reset_index(drop=True)
groupby_src_and_print(rsp_bad_fit, print_fn=print_fn)
print_fn(rsp_bad_fit.SOURCE.value_counts())
print_fn('\nSamples with good fit.')
rsp = rsp.loc[~id_drop, :].reset_index(drop=True)
groupby_src_and_print(rsp, print_fn=print_fn)
print_fn(rsp.SOURCE.value_counts())
print_fn(f'Dropped {sum(id_drop)} rsp data points.')
rsp['SOURCE'] = rsp['SOURCE'].apply(lambda x: x.lower())
if src is not None:
print_fn('\nExtract specific sources.')
rsp = rsp[rsp['SOURCE'].isin(src)].reset_index(drop=True)
rsp['AUC_bin'] = rsp['AUC'].map(lambda x: 0 if x > 0.5 else 1)
rsp.replace([np.inf, -np.inf], value=np.nan, inplace=True)
print_fn(f'rsp.shape {rsp.shape}')
groupby_src_and_print(rsp, print_fn=print_fn)
return rsp
def load_ge(fpath, print_fn=print, float_type=np.float32):
""" Load RNA-Seq data. """
print_fn(f'\nLoad RNA-Seq ... {fpath}')
ge = pd.read_csv(fpath, sep='\t', na_values=na_values)
ge.rename(columns={'Sample': 'CELL'}, inplace=True)
fea_id0 = 1
ge = add_fea_prfx(ge, prfx=fea_prfx_dct['ge'], id0=fea_id0)
if sum(ge.isna().sum() > 0):
# ge = impute_values(ge, print_fn=print_fn)
print_fn('Columns with NaNs: {}'.format( sum(ge.iloc[:, fea_id0:].isna().sum() > 0) ))
imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
# imputer = KNNImputer(missing_values=np.nan, n_neighbors=5,
# weights='uniform', metric='nan_euclidean',
# add_indicator=False)
ge.iloc[:, fea_id0:] = imputer.fit_transform(ge.iloc[:, fea_id0:].values)
print_fn('Columns with NaNs: {}'.format( sum(ge.iloc[:, fea_id0:].isna().sum() > 0) ))
# Cast features (casting to float16 changes the shape. why?)
ge = ge.astype(dtype={c: float_type for c in ge.columns[fea_id0:]})
print_fn(f'ge.shape {ge.shape}')
return ge
def load_dd(fpath, print_fn=print, dropna_th=0.1, float_type=np.float32, src=None):
""" Load drug descriptors. """
print_fn(f'\nLoad descriptors ... {fpath}')
dd = pd.read_csv(fpath, sep='\t', na_values=na_values)
dd.rename(columns={'ID': 'DRUG'}, inplace=True)
if "combined_mordred_descriptors" in fpath:
# Descriptors that were prepared by Maulik and are available in MoDaC
fea_id0 = 1
dd = add_fea_prfx(dd, prfx=fea_prfx_dct['dd'], id0=fea_id0)
elif 'nci60' in src:
dd = dropna(dd, axis=0, th=dropna_th)
fea_id0 = 2
else:
fea_id0 = 4
if sum(dd.isna().sum() > 0):
print_fn('Columns with all NaN values: {}'.format(
sum(dd.isna().sum(axis=0).sort_values(ascending=False) == dd.shape[0])))
print_fn('Columns with NaNs: {}'.format( sum(dd.iloc[:, fea_id0:].isna().sum() > 0) ))
imputer = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0)
# imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
# imputer = KNNImputer(missing_values=np.nan, n_neighbors=5,
# weights='uniform', metric='nan_euclidean',
# add_indicator=False)
dd.iloc[:, fea_id0:] = imputer.fit_transform(dd.iloc[:, fea_id0:].values)
print_fn('Columns with NaNs: {}'.format( sum(dd.iloc[:, fea_id0:].isna().sum() > 0) ))
# Cast features
dd = dd.astype(dtype={c: float_type for c in dd.columns[fea_id0:]})
print_fn(f'dd.shape {dd.shape}')
return dd
def plot_dd_na_dist(dd, savepath=None):
""" Plot distbirution of na values in drug descriptors. """
fig, ax = plt.subplots()
sns.distplot(dd.isna().sum(axis=0)/dd.shape[0], bins=100, kde=False, hist_kws={'alpha': 0.7})
plt.xlabel('Ratio of total NA values in a descriptor to the total drug count')
plt.ylabel('Total # of descriptors with the specified NA ratio')
plt.title('Histogram of descriptors based on ratio of NA values')
plt.grid(True)
if savepath is not None:
plt.savefig(savepath, bbox_inches='tight') # dpi=200
else:
plt.savefig('dd_hist_ratio_of_na.png', bbox_inches='tight') # dpi=200
def plot_rsp_dists(rsp, rsp_cols, savepath=None):
""" Plot distributions of all response variables.
Args:
rsp : df of response values
rsp_cols : list of col names
savepath : full path to save the image
"""
ncols = 4
nrows = int(np.ceil(len(rsp_cols)/ncols))
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=False, sharey=False, figsize=(10, 10))
for i, ax in enumerate(axes.ravel()):
if i >= len(rsp_cols):
fig.delaxes(ax) # delete un-used ax
else:
target_name = rsp_cols[i]
x = rsp[target_name].copy()
x = x[~x.isna()].values
sns.distplot(x, bins=100, kde=True, ax=ax, label=target_name, # fit=norm,
kde_kws={'color': 'k', 'lw': 0.4, 'alpha': 0.8},
hist_kws={'color': 'b', 'lw': 0.4, 'alpha': 0.5})
ax.tick_params(axis='both', which='major', labelsize=7)
txt = ax.yaxis.get_offset_text(); txt.set_size(7) # adjust exponent fontsize in xticks
txt = ax.xaxis.get_offset_text(); txt.set_size(7)
ax.legend(fontsize=5, loc='best')
ax.grid(True)
# plt.tight_layout(pad=0.5, w_pad=0.5, h_pad=1.0)
if savepath is not None:
plt.savefig(savepath, bbox_inches='tight') # dpi=200
else:
plt.savefig('rsp_dists.png', bbox_inches='tight')
def parse_args(args):
parser = argparse.ArgumentParser(description='Create ML dataframe.')
parser.add_argument('--rsp_path',
type=str,
help='Path to drug response file.')
parser.add_argument('--drug_path',
type=str,
help='Path to drug features file.')
parser.add_argument('--cell_path',
type=str,
help='Path to cell features file.')
parser.add_argument('--r2fit_th',
type=float,
default=0.5,
help='Drop drug response values with R-square fit \
less than this value (Default: 0.5).')
parser.add_argument('--drug_fea',
type=str,
nargs='+',
choices=['dd'],
default=['dd'],
help='Default: [dd].')
parser.add_argument('--cell_fea',
type=str,
nargs='+',
choices=['ge'],
default=['ge'],
help='Default: [ge].')
parser.add_argument('--gout',
type=str,
help='Default: ...')
parser.add_argument('--dropna_th',
type=float,
default=0,
help='Default: 0')
parser.add_argument('--src',
nargs='+',
default=None,
choices=['ccle', 'gcsi', 'gdsc', 'gdsc1', 'gdsc2', 'ctrp', 'nci60'],
help='Data sources to extract (default: None).')
# parser.add_argument('--n_samples',
# type=int,
# default=None,
# help='Number of docking scores to get into the ML df (default: None).')
parser.add_argument('--flatten',
action='store_true',
help='Flatten the distribution of response values (default: False).')
parser.add_argument('-t', '--trg_name',
default='AUC',
type=str,
choices=['AUC'],
help='Name of target variable (default: AUC).')
args = parser.parse_args(args)
return args
def run(args):
# import pdb; pdb.set_trace()
t0 = time()
rsp_cols = ['AUC', 'AUC1', 'EC50', 'EC50se', 'R2fit',
'Einf', 'IC50', 'HS', 'AAC1', 'DSS1']
outdir = create_outdir(args.gout, args)
# -----------------------------------------------
# Logger
# -----------------------------------------------
lg = Logger(outdir/'gen.df.log')
print_fn = get_print_func(lg.logger)
print_fn(f'File path: {filepath}')
print_fn(f'\n{pformat(vars(args))}')
dump_dict(vars(args), outpath=outdir/'gen.df.args')
# -----------------------------------------------
# Load response data and features
# -----------------------------------------------
rsp = load_rsp(args.rsp_path, src=args.src, r2fit_th=args.r2fit_th,
print_fn=print_fn)
ge = load_ge(args.cell_path, print_fn=print_fn, float_type=np.float32)
dd = load_dd(args.drug_path, dropna_th=args.dropna_th, print_fn=print_fn,
float_type=np.float32, src=args.src)
# -----------------------------------------------
# Merge data
# -----------------------------------------------
print_fn('\n{}'.format('-' * 40))
print_fn('Start merging response with other dfs.')
print_fn('-' * 40)
data = rsp
# Merge with ge
print_fn('\nMerge with expression (ge).')
data = | pd.merge(data, ge, on='CELL', how='inner') | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 27 12:06:21 2019
@author: <NAME>
@contact: <EMAIL>
"""
import os
import pandas as pd
from sector_weights import set_sector_weights as set_sector_weights
from cal_weighted_average import wavg
class MKT_Manager:
""" Market manager: collect market data
generate market index and key market
price signals
"""
def __init__(self):
self.cur_path = os.path.dirname(os.path.abspath(__file__))
self.parent_path = os.path.dirname(self.cur_path)
def gen_market_index_v2( self, start, end,
data_index, data_panel ):
# Generate market index table directly
# from commodity market index database
# Return current time market panel information
panel = data_panel[["Dates","Code","Open","High","Low","Close","OPI","Vol"]]
data_index = data_index[(data_index.index>=start) & (data_index.index<=end)]
# Return market index price overtime
sector_weight = set_sector_weights()
sector_overview = pd.DataFrame()
for block in sector_weight:
ans = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 29 08:35:09 2019
@author: user
"""
# build first input with noational aggregation
# import build_input_national_aggr
print('####################')
print('BUILDING INPUT DATA FOR DISAGGREGATION OF SWITZERLAND INTO ARCHETYPES')
print('####################')
import os
import itertools
import hashlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import grimsel.auxiliary.sqlutils.aux_sql_func as aql
import datetime
import seaborn as sns
from grimsel.auxiliary.aux_general import print_full
from grimsel.auxiliary.aux_general import translate_id
import config_local as conf
from grimsel.auxiliary.aux_general import expand_rows
base_dir = conf.BASE_DIR
data_path = conf.PATH_CSV
data_path_prv = conf.PATH_CSV + '_national_aggr'
seed = 2
np.random.seed(seed)
db = conf.DATABASE
sc = conf.SCHEMA
#db = 'grimsel_1'
#sc = 'lp_input_ee_dsm'
def append_new_rows(df, tb):
list_col = list(aql.get_sql_cols(tb, sc, db).keys())
aql.write_sql(df[list_col], db=db, sc=sc, tb=tb, if_exists='append')
def del_new_rows(ind, tb, df):
del_list = df[ind].drop_duplicates()
for i in ind:
del_list[i] = '%s = '%i + del_list[i].astype(str)
del_str = ' OR '.join(del_list.apply(lambda x: '(' + ' AND '.join(x) + ')', axis=1))
exec_strg = '''
DELETE FROM {sc}.{tb}
WHERE {del_str}
'''.format(tb=tb, sc=sc, del_str=del_str)
aql.exec_sql(exec_strg, db=db)
#def replace_table(df, tb):
#
## list_col = list(aql.get_sql_cols(tb, sc, db).keys())
#
# aql.write_sql(df, db=db, sc=sc, tb=tb, if_exists='replace')
def append_new_cols(df, tb):
#
list_col = list(aql.get_sql_cols(tb, sc, db).keys())
col_new = dict.fromkeys((set(df.columns.tolist()) - set(list_col)))
for key, value in col_new.items():
col_new[key] = 'DOUBLE PRECISION'
# col_new = dict.fromkeys((set(list_col[0].columns.tolist()) - set(list_col)),1)
aql.add_column(df_src=df,tb_tgt=[sc,tb],col_new=col_new,on_cols=list_col, db=db)
# exec_strg = '''
# AlTER
# DELETE FROM {sc}.{tb}
# WHERE {del_str}
# '''.format(tb=tb, sc=sc, del_str=del_str)
# aql.exec_sql(exec_strg, db=db)
#
# aql.write_sql(df[list_col], db=db, sc=sc, tb=tb, if_exists='append')
#
#aql.exec_sql('''
# ALTER TABLE lp_input_archetypes.profdmnd
# DROP CONSTRAINT profdmnd_pkey,
# DROP CONSTRAINT profdmnd_dmnd_pf_id_fkey;
# ''', db=db)
#%%
dfprop_era_arch = pd.read_csv(base_dir+'/archetype_disaggr/PV/prop_era_arch.csv', sep = ';')
#dfpv_arch = pd.read_csv(os.path.join(base_dir,'PV/surf_prod_arch_pv.csv'),sep=';')
#dfpv_arch = pd.read_csv(os.path.join(base_dir,'PV/surf_prod_arch_pv_prop_0.csv'),sep=';')
dfpv_arch = pd.read_csv(base_dir+'/archetype_disaggr/PV/surf_prod_arch_pv_prop_new.csv',sep=';')
# set nd_id to that potential
#dfpv_arch['pv_power_pot'] = dfpv_arch['el_prod']/(1000*dfkev['flh'].mean())
dfpv_arch = dfpv_arch.groupby(dfpv_arch.nd_id_new).sum()
#dfpv_arch['nd_id_new'] = dfpv_arch.nd_id
#dfpv_arch.loc[:,dfpv_arch.nd_id_new.str.contains('OTH')] == 'OTH_TOT'
#dfpv_arch['cap_pv'] = 1666*(dfpv_arch['pv_power_pot']/dfpv_arch['pv_power_pot'].sum()) # 1666 MW SFOE 2016
dfpv_arch['cap_pv'] = 1666*(dfpv_arch['pv_power_tot_est']/dfpv_arch['pv_power_tot_est'].sum()) # 1666 MW SFOE 2016
dfpv_arch['cap_st_pwr'] = 0
#
#dfpv_arch_CH0 = dfpv_arch.loc['CH0']
#dfpv_arch = dfpv_arch.drop(['CH0'], axis = 0)
dfpv_arch = dfpv_arch.reset_index()
# %%
dfload_arch = pd.read_csv(base_dir+'/archetype_disaggr/demand/dmnd_archetypes_0.csv').query(
'nd_id not in %s'%(['CH0'])).reset_index(drop=True)
dfload_arch['DateTime'] = dfload_arch['DateTime'].astype('datetime64[ns]')
dfload_arch_res = pd.read_csv(base_dir+'/archetype_disaggr/demand/dmnd_archetypes_0.csv').query(
'nd_id.str.contains("SFH") or nd_id.str.contains("MFH")',engine='python').reset_index(drop=True)
dfload_arch_res['DateTime'] = dfload_arch_res['DateTime'].astype('datetime64[ns]')
dfload_arch_notres = pd.read_csv(base_dir+'/archetype_disaggr/demand/dmnd_archetypes_0.csv').query(
'nd_id.str.contains("OCO") or nd_id.str.contains("IND")',engine='python').reset_index(drop=True)
dfload_arch_notres['DateTime'] = dfload_arch_notres['DateTime'].astype('datetime64[ns]')
dfload_arch_CH0 = pd.read_csv(base_dir+'/archetype_disaggr/demand/dmnd_archetypes_0.csv').query(
'nd_id in %s'%(['CH0'])).reset_index(drop=True)
dfload_arch_CH0['DateTime'] = dfload_arch_CH0['DateTime'].astype('datetime64[ns]')
# dfload_arch = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_0',filt=[('nd_id', ['CH0'],'!=')])
# dfload_arch_res= aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_0',filt=[('nd_id', ['SFH%','MFH%'],'LIKE')])
# dfload_arch_notres= aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_0',filt=[('nd_id', ['OCO%','IND%'],'LIKE')])
# dfload_arch_CH0_1 = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_0',filt=[('nd_id', ['CH0'])])
#dfload_arch = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes')
dfload_dict ={}
dfload_dict_new = {}
df = dfload_arch_res.copy()
df['nd_id_new'] = 0
df['erg_tot_new'] = 0
for i in df.nd_id.unique():
dfload_dict[i] = df.loc[df.nd_id == i]
for l in (0,1,2,3):
df_1 = dfload_dict[i].copy()
df_1['erg_tot_new'] = df_1.loc[:,'erg_tot'] * dfprop_era_arch.loc[dfprop_era_arch.nd_el.str.contains(i+'_'+str(l)),'prop'].reset_index(drop=True).loc[0]
df_1['nd_id_new'] = i+'_'+str(l)
dfload_dict_new[i+'_'+str(l)] = df_1
dfload_arch_res_new = dfload_arch_notres.head(0)
for j in dfload_dict_new:
dfload_arch_res_new = dfload_arch_res_new.append(dfload_dict_new[j],ignore_index=True)
dfload_arch_notres['nd_id_new'] = dfload_arch_notres[['nd_id']]
dfload_arch_notres['erg_tot_new'] = dfload_arch_notres[['erg_tot']]
dfload_arch = dfload_arch_res_new.append(dfload_arch_notres,ignore_index=True)
dfload_arch = dfload_arch.set_index('DateTime')
dfload_arch.index = pd.to_datetime(dfload_arch.index)
dfload_arch_CH0 = dfload_arch_CH0.set_index('DateTime')
dfload_arch = dfload_arch.drop(columns=['nd_id','erg_tot']).rename(columns={'nd_id_new':'nd_id','erg_tot_new':'erg_tot'})
# %%
np.random.seed(3)
dferg_arch = dfload_arch.groupby('nd_id')['erg_tot'].sum()
dferg_arch = dferg_arch.reset_index()
dferg_arch['nd_id_new'] = dferg_arch.nd_id
dict_nd = dferg_arch.set_index('nd_id')['nd_id_new'].to_dict()
# %%
df_solar_canton_raw = pd.read_csv(base_dir+'/archetype_disaggr/PV/swiss_location_solar.csv')[['value', 'hy', 'canton','DateTime']]
df_solar_canton_raw['DateTime'] = df_solar_canton_raw['DateTime'].astype('datetime64[ns]')
# df_solar_canton_raw_test = aql.read_sql(db, 'profiles_raw', 'swiss_location_solar',
# keep=['value', 'hy', 'canton','DateTime'])
df_solar_canton_raw_1 = df_solar_canton_raw.pivot_table(index='DateTime',columns='canton', values='value')
df_solar_canton_1h = df_solar_canton_raw_1.resample('1h').sum()/4
df_solar_canton_1h['avg_all'] = df_solar_canton_1h.mean(axis=1)
df_solar_canton_1h['DateTime'] = df_solar_canton_1h.index
df_solar_canton_1h = df_solar_canton_1h.reset_index(drop=True)
df_solar_canton_1h['hy'] = df_solar_canton_1h.index
df_solar_canton_raw_1h = pd.melt(df_solar_canton_1h, id_vars=['DateTime','hy'], var_name='canton', value_name='value')
df_solar_canton_1h.index = df_solar_canton_1h['DateTime']
df_solar_canton_1h = df_solar_canton_1h.drop(columns=['DateTime','hy'])
cols = df_solar_canton_1h.columns.tolist()
cols = cols[-1:] + cols[:-1]
df_solar_canton_1h = df_solar_canton_1h[cols]
#list_ct = df_solar_canton_raw.canton.unique().tolist()
list_ct = df_solar_canton_1h.columns.tolist()
# %% ~~~~~~~~~~~~~~~~~~ DEF_NODE
#
#df_def_node_0 = aql.read_sql(db, sc, 'def_node', filt=[('nd', ['SFH%'], ' NOT LIKE ')])
#df_nd_add = pd.DataFrame(pd.concat([dferg_filt.nd_id_new.rename('nd'),
# ], axis=0)).reset_index(drop=True)
color_nd = {'IND_RUR': '#472503',
'IND_SUB': '#041FA3',
'IND_URB': '#484A4B',
'MFH_RUR_0': '#924C04',
'MFH_SUB_0': '#0A81EE',
'MFH_URB_0': '#BDC3C5',
'MFH_RUR_1': '#924C04',
'MFH_SUB_1': '#0A81EE',
'MFH_URB_1': '#BDC3C5',
'MFH_RUR_2': '#924C04',
'MFH_SUB_2': '#0A81EE',
'MFH_URB_2': '#BDC3C5',
'MFH_RUR_3': '#924C04',
'MFH_SUB_3': '#0A81EE',
'MFH_URB_3': '#BDC3C5',
'OCO_RUR': '#6D3904',
'OCO_SUB': '#0A31EE',
'OCO_URB': '#818789',
'SFH_RUR_0': '#BD6104',
'SFH_SUB_0': '#0EBADF',
'SFH_URB_0': '#A9A4D8',
'SFH_RUR_1': '#BD6104',
'SFH_SUB_1': '#0EBADF',
'SFH_URB_1': '#A9A4D8',
'SFH_RUR_2': '#BD6104',
'SFH_SUB_2': '#0EBADF',
'SFH_URB_2': '#A9A4D8',
'SFH_RUR_3': '#BD6104',
'SFH_SUB_3': '#0EBADF',
'SFH_URB_3': '#A9A4D8',
}
col_nd_df = pd.DataFrame.from_dict(color_nd, orient='index').reset_index().rename(columns={'index': 'nd',0:'color'})
df_def_node_0 = pd.read_csv(data_path_prv + '/def_node.csv')
# df_def_node_0 = aql.read_sql(db, sc, 'def_node')
df_nd_add = pd.DataFrame(pd.concat([dferg_arch.nd_id_new.rename('nd'),
], axis=0)).reset_index(drop=True)
# reduce numbar
#df_nd_add = df_nd_add
nd_id_max = df_def_node_0.loc[~df_def_node_0.nd.isin(df_nd_add.nd)].nd_id.max()
df_nd_add['nd_id'] = np.arange(0, len(df_nd_add)) + nd_id_max + 1
#df_nd_add['color'] = 'g'
df_nd_add = pd.merge(df_nd_add,col_nd_df, on = 'nd')
df_def_node = df_nd_add.reindex(columns=df_def_node_0.columns.tolist()).fillna(0)
dict_nd_id = df_nd_add.set_index('nd')['nd_id'].to_dict()
dict_nd_id = {nd_old: dict_nd_id[nd] for nd_old, nd in dict_nd.items()
if nd in dict_nd_id}
# %% set nd_id number to the corresponding nd_id new
dfpv_arch = dfpv_arch.set_index(dfpv_arch['nd_id_new'])
for key, value in dict_nd_id.items():
dfpv_arch.loc[key,'nd_id'] = value
dferg_arch = dferg_arch.set_index(dfpv_arch['nd_id_new'])
for key, value in dict_nd_id.items():
dferg_arch.loc[key,'nd_id'] = value
# %% ~~~~~~~~~~~~~~~~~~~~~~~ DEF_PP_TYPE
df_def_pp_type_0 = pd.read_csv(data_path_prv + '/def_pp_type.csv')
# df_def_pp_type_0 = aql.read_sql(db, sc, 'def_pp_type')
df_def_pp_type = df_def_pp_type_0.copy().head(0)
for npt, pt, cat, color in ((0, 'STO_LI_SFH', 'NEW_STORAGE_LI_SFH', '#7B09CC'),
(1, 'STO_LI_MFH', 'NEW_STORAGE_LI_MFH', '#59F909'),
(2, 'STO_LI_OCO', 'NEW_STORAGE_LI_OCO', '#28A503'),
(3, 'STO_LI_IND', 'NEW_STORAGE_LI_IND', '#1A6703'),
(4, 'PHO_SFH', 'PHOTO_SFH', '#D9F209'),
(5, 'PHO_MFH', 'PHOTO_MFH', '#F2D109'),
(6, 'PHO_OCO', 'PHOTO_OCO', '#F27E09'),
(7, 'PHO_IND', 'PHOTO_IND', '#F22C09'),):
df_def_pp_type.loc[npt] = (npt, pt, cat, color)
df_def_pp_type['pt_id'] = np.arange(0, len(df_def_pp_type)) + df_def_pp_type_0.pt_id.max() + 1
# %% ~~~~~~~~~~~~~~~~~~~~~~ DEF_FUEL
# all there
df_def_fuel = pd.read_csv(data_path_prv + '/def_fuel.csv')
# df_def_fuel_test = aql.read_sql(db, sc, 'def_fuel')
# %% ~~~~~~~~~~~~~~~~~~~~~~~ DEF_PLANT
df_def_plant_0 = pd.read_csv(data_path_prv + '/def_plant.csv')
# df_def_plant_test = aql.read_sql(db, sc, 'def_plant')
dict_pp_id_all = df_def_plant_0.set_index('pp')['pp_id'].to_dict()
df_pp_add_0 = pd.DataFrame(df_nd_add.nd).rename(columns={'nd': 'nd_id'})
df_pp_add_1 = df_pp_add_0.nd_id.str.slice(stop=3)
df_pp_add = pd.DataFrame()
for sfx, fl_id, pt_id, set_1 in [('_PHO', 'photovoltaics', 'PHO_', ['set_def_pr','set_def_add']),
('_STO_LI', 'new_storage', 'STO_LI_', ['set_def_st','set_def_add']),
]:
new_pp_id = df_def_plant_0.pp_id.max() + 1
data = dict(pp=df_pp_add_0 + sfx,
fl_id=fl_id, pt_id=pt_id + df_pp_add_1 , pp_id=np.arange(new_pp_id, new_pp_id + len(df_pp_add_0)),
**{st: 1 if st in set_1 else 0 for st in [c for c in df_def_plant_0.columns if 'set' in c]})
df_pp_add = df_pp_add.append(df_pp_add_0.assign(**data), sort=True)
df_pp_add.pp_id = np.arange(0, len(df_pp_add)) + df_pp_add.pp_id.min()
df_def_plant = df_pp_add[df_def_plant_0.columns].reset_index(drop=True)
for df, idx in [(df_def_fuel, 'fl'), (df_def_pp_type, 'pt'), (df_def_node, 'nd')]:
df_def_plant, _ = translate_id(df_def_plant, df, idx)
# selecting random profiles from canton list
#np.random.seed(4)
dict_pp_id = df_pp_add.set_index('pp')['pp_id'].to_dict()
df_pp_add_pho = df_pp_add.loc[df_pp_add.fl_id == 'photovoltaics']
dict_pp_id_pho = df_pp_add_pho.set_index('pp')['pp_id'].to_dict()
# solar profile dictionary by node
dict_ct = {pp: list_ct[npp%len(list_ct)]
for npp, pp in enumerate(df_pp_add.loc[df_pp_add.fl_id == 'photovoltaics',
'nd_id'].tolist())}
dict_ct = {pp: list_ct[0]
for npp, pp in enumerate(df_pp_add.loc[df_pp_add.fl_id == 'photovoltaics',
'nd_id'].tolist())}
# %% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEF_PROFILE
df_def_profile_0 = pd.read_csv(data_path_prv + '/def_profile.csv')
# df_def_profile_test = aql.read_sql(db, sc, 'def_profile')
df_def_profile_sup = pd.DataFrame({'primary_nd': df_solar_canton_1h.columns}) + '_PHO'
df_def_profile_sup['pf'] = 'supply_' + df_def_profile_sup.primary_nd
df_def_profile_sup['pf_id'] = df_def_profile_sup.index.rename('pf_id') + df_def_profile_0.pf_id.max() + 1
df_def_profile_sup = df_def_profile_sup[df_def_profile_0.columns]
df_def_profile_sup.drop(df_def_profile_sup.tail(23).index,inplace=True) # to keep only average for now
# Demand profiles
df_def_profile_dmnd = df_def_node.nd.copy().rename('primary_nd').reset_index()
df_def_profile_dmnd['pf'] = 'demand_EL_' + df_def_profile_dmnd.primary_nd
df_def_profile_dmnd['pf_id'] = df_def_profile_dmnd.index.rename('pf_id') + df_def_profile_sup.pf_id.max() + 1
df_def_profile_dmnd = df_def_profile_dmnd[df_def_profile_0.columns]
df_def_profile = pd.concat([df_def_profile_sup, df_def_profile_dmnd], axis=0)
# df_def_profile_prc], axis=0)
df_def_profile = df_def_profile.reset_index(drop=True)
#df_def_profile = pd.concat([df_def_profile_sup, df_def_profile_dmnd], axis=0)
df_def_profile
# %% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NODE_ENCAR
df_node_encar_0 = pd.read_csv(data_path_prv + '/node_encar.csv')
# df_node_encar_0 = aql.read_sql(db, sc, 'node_encar')
df_node_encar_0_CH0 = df_node_encar_0.copy().loc[(df_node_encar_0.nd_id == 1)]
factor_CH0_dmnd = dfload_arch_CH0.erg_tot.sum()/df_node_encar_0.loc[(df_node_encar_0.nd_id == 1)].dmnd_sum
factor_CH0_dmnd = factor_CH0_dmnd.reset_index(drop=True)
df = df_node_encar_0_CH0.filter(like='dmnd_sum')*factor_CH0_dmnd.loc[0]
df_node_encar_0_CH0.update(df)
#exec_str = '''UPDATE sc.node_encar SET
# SET sc.dmnd_sum = df_node_encar_0_CH0.dmnd_sum
# WHERE nd_id = 1
#
# '''
#aql.exec_sql(exec_str=exec_str,db=db)
#df_ndca_add = (dferg_filt.loc[dferg_filt.nd_id_new.isin(df_nd_add.nd), ['nd_id_new', 'erg_tot_filled']]
# .rename(columns={'erg_tot_filled': 'dmnd_sum', 'nd_id_new': 'nd_id'}))
df_ndca_add = (dferg_arch.loc[dferg_arch.nd_id_new.isin(df_nd_add.nd), ['nd_id_new', 'erg_tot']]
.rename(columns={'erg_tot': 'dmnd_sum', 'nd_id_new': 'nd_id'}))
#TODO maybe add here some grid losses
data = dict(vc_dmnd_flex=0.1, ca_id=0, grid_losses=0.0413336227316051, grid_losses_absolute=0)
df_node_encar = df_ndca_add.assign(**data).reindex(columns=df_node_encar_0.columns)
list_dmnd = [c for c in df_node_encar if 'dmnd_sum' in c]
df_node_encar = df_node_encar.assign(**{c: df_node_encar.dmnd_sum
for c in list_dmnd})
df_node_encar = pd.merge(df_node_encar, df_def_profile_dmnd, left_on='nd_id', right_on='primary_nd', how='inner')
df_node_encar['dmnd_pf_id'] = df_node_encar.pf
df_node_encar = df_node_encar.loc[:, df_node_encar_0.columns]
for df, idx in [(df_def_node, 'nd'), (df_def_profile, ['pf', 'dmnd_pf'])]:
df_node_encar, _ = translate_id(df_node_encar, df, idx)
fct_dmnd = pd.read_csv(base_dir+'/archetype_disaggr/demand/factor_dmnd_future_years.csv',sep=';')
df = df_node_encar.filter(like='dmnd_sum')*fct_dmnd
df_node_encar.update(df)
df_0 = df_node_encar_0[df_node_encar_0.nd_id !=1]
# TODO REPLACE INSTEAD OF UPDATE
df_node_encar_new = pd.concat([df_0,df_node_encar_0_CH0,df_node_encar])
# set the absolute losses
df_node_encar_new.loc[df_node_encar_new.nd_id ==1,['grid_losses_absolute']] = 142320
df_node_encar_new = df_node_encar_new.reset_index(drop=True)
# %% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PROFDMND
df_profdmnd_0 = pd.read_csv(data_path_prv + '/profdmnd.csv').query('dmnd_pf_id not in %s'%([0]))
# df_profdmnd_test = aql.read_sql(db, sc, 'profdmnd', filt=[('dmnd_pf_id', [0], '!=')])
#df_profdmnd_0 = aql.read_sql(db, sc, 'profdmnd', filt=[('hy', [0])], limit=1)
df_dmnd_add = dfload_arch
df_dmnd_add_CH0 = dfload_arch_CH0
#
#df_dmnd_add = dfload_arch.loc[dfload_arch.nd_id.isin([{val: key for key, val in dict_nd_id.items()}[nd] for nd in df_nd_add.nd_id])]
#
#df_dmnd_add = dfcr_filt.loc[dfcr_filt.nd_id.isin([{val: key for key, val in dict_nd_id.items()}[nd] for nd in df_nd_add.nd_id])]
df_dmnd_add['nd_id'] = df_dmnd_add.nd_id.replace(dict_nd)
df_dmnd_add['ca_id'] = 0
df_dmnd_add = pd.merge(df_dmnd_add, df_def_profile[['pf_id', 'primary_nd']], left_on='nd_id', right_on='primary_nd')
df_dmnd_add = df_dmnd_add.rename(columns={'erg_tot': 'value', 'pf_id': 'dmnd_pf_id'})
df_dmnd_add_CH0['ca_id'] = 0
df_dmnd_add_CH0['pf_id'] = 0
df_dmnd_add_CH0['primary_nd'] = 'CH0'
df_dmnd_add_CH0 = df_dmnd_add_CH0.rename(columns={'erg_tot': 'value', 'pf_id': 'dmnd_pf_id'})
#df_dmnd_add['value'] = df_dmnd_add.value / 1e3
df_profdmnd = df_dmnd_add[df_profdmnd_0.columns.tolist()].reset_index(drop=True)
df_profdmnd_CH0 = df_dmnd_add_CH0[df_profdmnd_0.columns.tolist()].reset_index(drop=True)
# TODO REPLACE INSTEAD OF UPDATE
df_profdmnd_new = pd.concat([df_profdmnd_CH0,df_profdmnd_0,df_profdmnd])
# %% ~~~~~~~~~~~~~~~~~~~~~~~~~~ PROFPRICE
# --> NO CHANGES! HOUSEHOLDS USE CH0 PRICE PROFILES
# %% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PROFSUPPLY
#
df_profsupply = pd.read_csv(data_path_prv + '/profsupply.csv').head()
# df_profsupply = aql.read_sql(db, sc, 'profsupply', filt=[('hy', [0])], limit=1)
df_sup_add = df_pp_add.loc[df_pp_add.fl_id == 'photovoltaics', ['pp_id', 'nd_id']]
df_sup_add['canton'] = df_sup_add.nd_id.replace(dict_ct)
df_sup_add = df_sup_add[['canton']].drop_duplicates()
df_sup_add_new = pd.merge(df_sup_add, df_solar_canton_raw_1h, on='canton', how='inner')
dict_pf_id = df_def_profile_sup.set_index('pf')['pf_id'].to_dict()
#dict_pf_id = {ct: dict_pf_id['supply_' + ct + '_PHO'] for ct in list_ct}
dict_pf_id = {'avg_all': dict_pf_id['supply_' + 'avg_all' + '_PHO']}
df_sup_add_new['supply_pf_id'] = df_sup_add_new.canton.replace(dict_pf_id)
df_profsupply = df_sup_add_new[df_profsupply.columns.tolist()]
# %% ~~~~~~~~~~~~~~~~~~~~~~~ PLANT_ENCAR (needs profsupply data)
df_plant_encar = pd.read_csv(data_path_prv + '/plant_encar.csv')
# df_plant_encar = aql.read_sql(db, sc, 'plant_encar')
# Setting CH0 PV capacity to zero
df_plant_encar.loc[df_plant_encar.pp_id==dict_pp_id_all['CH_SOL_PHO'],
df_plant_encar.columns.str.contains('cap_pwr_leg')] = 0
# TODO choose for storage cap_pwr_leg
#dfpvbatt_costs = pd.read_csv(os.path.join(base_dir,'Costs/cost_tech_node.csv'),sep=';')
#dfpvbatt_costs = pd.read_csv(os.path.join(base_dir,'Costs/cost_tech_node_li12yr.csv'),sep=';')
# dfpvbatt_costs = pd.read_csv(os.path.join(base_dir,'Costs/cost_tech_node_li15yr.csv'),sep=';')
dfpvbatt_costs = pd.read_csv(os.path.join(base_dir+ '/archetype_disaggr/costs/cost_tech_node_li15yr.csv'),sep=';')
df_ppca_add = (dfpv_arch.set_index('nd_id_new')
.loc[df_nd_add.nd, ['cap_pv', 'cap_st_pwr']]
.rename(columns={'cap_pv': 'PHO',
'cap_st_pwr': 'STO_LI'})
.stack().reset_index()
.rename(columns={'level_1': 'pt_id',
'nd_id_new': 'nd_id',
0: 'cap_pwr_leg'}))
#df_ppca_add_1 = pd.merge(df_ppca_add, dfpv_arch.reset_index(drop=True)[['pv_power_pot','nd_id_new']], left_on='nd_id', right_on='nd_id_new', how='inner')
df_ppca_add_1 = pd.merge(df_ppca_add, dfpv_arch.reset_index(drop=True)[['pv_power_tot_est','nd_id_new']], left_on='nd_id', right_on='nd_id_new', how='inner')
df_ppca_add_1 = df_ppca_add_1.drop(columns='nd_id_new')
#df_ppca_add_1.loc[df_ppca_add_1.pt_id.str.contains('STO_LI'),'pv_power_pot'] = 0
df_ppca_add_1.loc[df_ppca_add_1.pt_id.str.contains('STO_LI'),'pv_power_tot_est'] = 0
#
#df_ppca_add = (dferg_filt.set_index('nd_id_new')
# .loc[df_nd_add.nd, ['cap_pv', 'cap_st_pwr']]
# .rename(columns={'cap_pv': 'PHO_SFH',
# 'cap_st_pwr': 'STO_SFH'})
# .stack().reset_index()
# .rename(columns={'level_1': 'pt_id',
# 'nd_id_new': 'nd_id',
# 0: 'cap_pwr_leg'}))
df_ppca_add['supply_pf_id'] = 'supply_' + df_ppca_add.nd_id.replace(dict_ct) + '_PHO'
df_ppca_add['pt_id'] = df_ppca_add['pt_id'] + '_' + df_ppca_add.nd_id.replace(dict_nd).str.slice(stop=3)
df_ppca_add.loc[~df_ppca_add.pt_id.str.contains('PHO'), 'supply_pf_id'] = None
df_ppca_add_2 = df_ppca_add.copy()
df_ppca_add = df_ppca_add.set_index(df_ppca_add.pt_id)
#dfpvbatt_costs = dfpvbatt_costs.set_index(dfpvbatt_costs.pt_id).drop(columns=['nd_id','pt_id'])
df_ppca_add = pd.merge(dfpvbatt_costs,df_ppca_add_2, on=['pt_id','nd_id'])
#df_ppca_add = pd.concat([dfpvbatt_costs,df_ppca_add],axis=1)
df_ppca_add = df_ppca_add.reset_index(drop=True)
## sale and purches capacity is 110% of maximum
list_nd_0 = [nd_0 for nd_0, nd in dict_nd.items()
if nd in df_def_node.nd.tolist()]
cap_prc = dfload_arch.loc[dfload_arch.nd_id.isin(list_nd_0)].pivot_table(index='nd_id', values='erg_tot', aggfunc='max') * 1
cap_prc = cap_prc.rename(columns={'erg_tot': 'cap_pwr_leg'}).reset_index().assign(supply_pf_id=None)
cap_prc = cap_prc.set_index(['nd_id'])
df_pp_add_1 = df_pp_add.set_index(['nd_id'])
cap_prc['pt_id'] = df_pp_add_1.loc[df_pp_add_1.pt_id.str.contains('PRC')].pt_id
cap_prc = cap_prc.reset_index()
df_ppca_add = df_ppca_add.assign(ca_id=0, pp_eff=1,
factor_lin_0=0, factor_lin_1=0,
cap_avlb=1, vc_ramp=0,
vc_om=0, erg_chp=None)
# translate to ids before joining pp_id column
for df, idx in [(df_def_pp_type, 'pt'), (df_def_node, 'nd'), (df_def_profile, ['pf', 'supply_pf'])]:
df_ppca_add, _ = translate_id(df_ppca_add, df, idx)
df_ppca_add['supply_pf_id'] = pd.to_numeric(df_ppca_add.supply_pf_id)
join_idx = ['pt_id', 'nd_id']
df_ppca_add = (df_ppca_add.join(df_def_plant.set_index(join_idx)[['pp', 'pp_id']],
on=join_idx))
list_cap = [c for c in df_plant_encar.columns if 'cap_pwr_leg' in c]
df_ppca_add = df_ppca_add.assign(**{cap: df_ppca_add.cap_pwr_leg for cap in list_cap})
df_plant_encar_1 = pd.concat([df_plant_encar, df_ppca_add])
df_plant_encar_1 = df_plant_encar_1.drop(columns=['nd_id','pt_id','pp'])
#df_plant_encar = df_ppca_add.loc[:, df_plant_encar.columns]
df_plant_encar_1 = df_plant_encar_1.set_index('pp_id')
for key, value in dict_pp_id_pho.items():
df_plant_encar_1.loc[value, 'pwr_pot'] = dfpv_arch.loc[key[0:-4],'pv_power_tot_est']
# TODO REPLACE INSTEAD OF UPDATE
df_plant_encar_new = df_plant_encar_1.reset_index()
# %% ~~~~~~~~~~~~~~~~~~~~ NODE_CONNECT
df_node_connect = pd.read_csv(data_path_prv + '/node_connect.csv')
# df_node_connect = aql.read_sql(db, sc, 'node_connect',
# filt=[('nd_id', df_def_node.nd_id.tolist(), ' != ', ' AND '),
# ('nd_2_id', df_def_node.nd_id.tolist(), ' != ', ' AND ')])
node_sfh = df_def_node.loc[df_def_node.nd.str.contains('SFH')].nd.values
node_mfh = df_def_node.loc[df_def_node.nd.str.contains('MFH')].nd.values
node_oco = df_def_node.loc[df_def_node.nd.str.contains('OCO')].nd.values
node_ind = df_def_node.loc[df_def_node.nd.str.contains('IND')].nd.values
#node_oth = df_def_node.loc[df_def_node.nd.str.contains('OTH')].nd.values
node_rur = df_def_node.loc[df_def_node.nd.str.contains('RUR')].nd.values
node_sub = df_def_node.loc[df_def_node.nd.str.contains('SUB')].nd.values
node_urb = df_def_node.loc[df_def_node.nd.str.contains('URB')].nd.values
#def get_cap_sllprc(sllprc):
# dict_nd_id = df_def_node.set_index('nd_id')['nd'].to_dict()
# df_cap = pd.merge(df_def_plant.loc[df_def_plant.pp.str.contains(sllprc)],
# df_plant_encar, on='pp_id', how='inner')
# df_cap['nd_id'] = df_cap.nd_id.replace(dict_nd_id)
# return df_cap.set_index('nd_id')['cap_pwr_leg']
def get_cap_sllprc(sllprc):
dict_nd_id = df_def_node.set_index('nd_id')['nd'].to_dict()
df_cap = pd.merge(df_def_plant.loc[df_def_plant.pp.str.contains(sllprc)],
df_plant_encar_new, on='pp_id', how='inner')
df_cap['nd_id'] = df_cap.nd_id.replace(dict_nd_id)
return df_cap.set_index('nd_id')['cap_pwr_leg']
df_cap = cap_prc.set_index('nd_id')['cap_pwr_leg']
df_cap_rur = df_cap[df_cap.index.str.contains('RUR')] * (1.05 + (1-(1-data['grid_losses'])**0.5))
df_cap_urb = df_cap[df_cap.index.str.contains('URB')] * (1.15 + (1-(1-data['grid_losses'])**0.5))
df_cap_sub = df_cap[df_cap.index.str.contains('SUB')] * (1.1 + (1-(1-data['grid_losses'])**0.5))
df_cap = pd.concat([df_cap_rur, df_cap_sub, df_cap_urb])
# external connection hh load+PV <-> grid
#data = dict(nd_id='CH0', nd_2_id=node_sfh, ca_id=0, mt_id='all')
data_sfh = dict(nd_id='CH0', nd_2_id=node_sfh, ca_id=0, mt_id='all')
data_mfh = dict(nd_id='CH0', nd_2_id=node_mfh, ca_id=0, mt_id='all')
data_oco = dict(nd_id='CH0', nd_2_id=node_oco, ca_id=0, mt_id='all')
data_ind = dict(nd_id='CH0', nd_2_id=node_ind, ca_id=0, mt_id='all')
#data_oth = dict(nd_id='CH0', nd_2_id=node_oth, ca_id=0, mt_id='all')
data_rur = dict(nd_id='CH0', nd_2_id=node_rur, ca_id=0, mt_id='all')
data_sub = dict(nd_id='CH0', nd_2_id=node_sub, ca_id=0, mt_id='all')
data_urb = dict(nd_id='CH0', nd_2_id=node_urb, ca_id=0, mt_id='all')
df_typ_gd = pd.concat([pd.DataFrame(data_rur), pd.DataFrame(data_sub),pd.DataFrame(data_urb)])
df_typ_gd = expand_rows(df_typ_gd, ['mt_id'], [range(12)])
df_typ_gd = pd.merge(df_typ_gd, df_cap.rename('cap_trmi_leg').reset_index(),
left_on='nd_2_id', right_on='nd_id', suffixes=('', '_temp')).drop('nd_id_temp', axis=1)
df_typ_gd = pd.merge(df_typ_gd, df_cap.rename('cap_trme_leg').reset_index(),
left_on='nd_2_id', right_on='nd_id', suffixes=('', '_temp')).drop('nd_id_temp', axis=1)
df_node_connect = df_typ_gd[df_node_connect.columns]
dft = pd.concat([df_def_node, pd.read_csv(data_path_prv + '/def_node.csv')])
for idx in [('nd'), (['nd', 'nd_2'])]:
df_node_connect, _ = translate_id(df_node_connect, dft, idx)
# %% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FUEL_NODE_ENCAR
df_fuel_node_encar_0 = pd.read_csv(data_path_prv + '/fuel_node_encar.csv')
# df_fuel_node_encar_0 = aql.read_sql(db, sc, 'fuel_node_encar')
df_flndca_add = df_ppca_add.copy()[['pp_id', 'ca_id']]
df_flndca_add = df_flndca_add.join(df_def_plant.set_index('pp_id')[['fl_id', 'nd_id']], on='pp_id')
df_flndca_add['nd_id'] = df_flndca_add.nd_id.replace(df_def_node.set_index('nd_id')['nd'].to_dict())
df_flndca_add['fl_id'] = df_flndca_add.fl_id.replace(df_def_fuel.set_index('fl_id')['fl'].to_dict())
df_flndca_add = df_flndca_add.drop('pp_id', axis=1).drop_duplicates()
# has_profile points to the nd_id for which the profile is defined
#df_flndca_add.loc[df_flndca_add.fl_id == 'electricity', 'pricesll_pf_id'] = 'pricesll_electricity_CH0_15min'
#df_flndca_add.loc[df_flndca_add.fl_id == 'electricity', 'pricebuy_pf_id'] = 'pricebuy_electricity_CH0_15min'
df_flndca_add.loc[df_flndca_add.fl_id == 'electricity', 'pricesll_pf_id'] = 'pricesll_electricity_CH0_1h'
df_flndca_add.loc[df_flndca_add.fl_id == 'electricity', 'pricebuy_pf_id'] = 'pricebuy_electricity_CH0_1h'
#df_flndca_add.loc[df_flndca_add.fl_id == 'electricity', 'pricesll_pf_id'] = 'pricesll_electricity_CH0'
#df_flndca_add.loc[df_flndca_add.fl_id == 'electricity', 'pricebuy_pf_id'] = 'pricebuy_electricity_CH0'
df_fuel_node_encar = df_flndca_add.reindex(columns=df_fuel_node_encar_0.columns)
fill_cols = [c for c in df_fuel_node_encar.columns
if any(pat in c for pat in ['vc_fl', 'erg_inp'])]
df_fuel_node_encar[fill_cols] = df_fuel_node_encar[fill_cols].fillna(0)
for df, idx in [(df_def_fuel, 'fl'), (df_def_node, 'nd')]:
# (df_def_profile_prc, ['pf', 'pricesll_pf']),
# (df_def_profile_prc, ['pf', 'pricebuy_pf'])]:
df_fuel_node_encar, _ = translate_id(df_fuel_node_encar, df, idx)
# %% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DF_PROFPRICE
df_profprice = pd.read_csv(data_path_prv + '/profprice.csv')
# df_profprice = aql.read_sql(db, sc, 'profprice')
# %% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DF_PROFPRICE
#list_pf = df_def_profile_0.loc[df_def_profile_0.pf.str.contains('pricebuy_electricity_CH0|pricesll_electricity_CH0')].pf_id.tolist()
#
#df_profprice_0 = aql.read_sql(db, sc, 'profprice', filt=[('price_pf_id', list_pf)])
#
#def expand(df):
#
# df_ret = pd.merge(df_profsupply[['hy']].drop_duplicates(), df,
# on='hy', how='outer')
# df_ret['price_pf_id'] = df_ret.price_pf_id.fillna(method='ffill')
#
# df_ret.value = df_ret.value.interpolate()
#
# return df_ret
#
#df_profprice = df_profprice_0.assign(hy=df_profprice_0.hy.astype(float)).groupby('price_pf_id').apply(expand).reset_index(drop=True)
#df_profprice = pd.merge(df_profprice, df_def_profile_0[['pf_id', 'pf']], left_on='price_pf_id', right_on='pf_id', how='left')
##df_profprice['pf'] = df_profprice.pf + '_15min'
#df_profprice['pf'] = df_profprice.pf + '_1h'
#df_profprice = df_profprice.drop(['pf_id', 'price_pf_id'], axis=1)
#df_profprice = df_profprice.join(df_def_profile_prc.set_index('pf').pf_id.rename('price_pf_id'), on='pf').drop('pf', axis=1)
#
#df_profprice = df_profprice[df_profprice_0.columns]
# %%
#df_node_encar_new
#df_profdmnd_new
#df_plant_encar_new
# list_tb_col = [
# (df_plant_encar_new, 'plant_encar', ['pp_id', 'ca_id'])
# ]
# list_tb_new = [
# (df_node_encar_new, 'node_encar', ['nd_id', 'ca_id']),
# (df_profdmnd_new, 'profdmnd', ['dmnd_pf_id']),
# (df_plant_encar_new, 'plant_encar', ['pp_id', 'ca_id']),
# ]
# list_tb = [(df_fuel_node_encar, 'fuel_node_encar', ['nd_id', 'fl_id', 'ca_id']),
# (df_node_encar_new, 'node_encar', ['nd_id', 'ca_id']),
# (df_profdmnd_new, 'profdmnd', ['dmnd_pf_id']),
# (df_profsupply, 'profsupply', ['supply_pf_id']),
# (df_profprice, 'profprice', ['price_pf_id']),
# (df_node_connect, 'node_connect', ['nd_id', 'nd_2_id']),
# (df_plant_encar_new, 'plant_encar', ['pp_id', 'ca_id']),
# (df_def_plant, 'def_plant', ['pp_id']),
# (df_def_node, 'def_node', ['nd_id']),
# # (df_def_fuel, 'def_fuel', ['fl_id']), <-- NO CHANGE
# (df_def_pp_type, 'def_pp_type', ['pt_id']),
# (df_def_profile, 'def_profile', ['pf_id'])
# ]
# # tables with foreign keys first
# #df, tb, ind = (df_def_plant, 'def_plant', ['pp_id'])
# df, tb = (df_plant_encar_new, 'plant_encar')
# append_new_cols(df, tb)
# #for df, tb, ind in list_tb_col:
# # print('Replacing table %s'%tb)
# # append_new_cols(df, tb)
# for df, tb, ind in list_tb:
# print('Deleting from table %s'%tb)
# del_new_rows(ind, tb, df)
# # tables with foreign keys last
# for df, tb, ind in reversed(list_tb):
# print('Appending to table %s'%tb)
# append_new_rows(df, tb)
# for tb in aql.get_sql_tables(sc, db):
# print(tb)
# df = aql.read_sql(db, sc, tb)
# if 'prof' in tb and 'value' in df.columns:
# df['value'] = df['value'].round(13)
# df.to_csv(os.path.join(data_path, '%s.csv'%tb), index=False)
list_tb_app = {'def_node':df_def_node,
'def_pp_type':df_def_pp_type,
'fuel_node_encar': df_fuel_node_encar,
'profsupply': df_profsupply,
'node_connect': df_node_connect,
'def_plant': df_def_plant,
'def_profile':df_def_profile,
}
list_tb_new = {'plant_encar' : df_plant_encar_new,
'node_encar' : df_node_encar_new,
'profdmnd' : df_profdmnd_new,
}
import glob
csv_files_previous = glob.glob(os.path.join(data_path_prv, "*.csv"))
for f in csv_files_previous:
# read the csv file
df_prv = | pd.read_csv(f) | pandas.read_csv |
import pandas as pd
import numpy as np
if __name__ == "__main__":
df = pd.read_csv('owid-covid-data.csv')
| pd.set_option("display.max_rows", None, "display.max_columns", None) | pandas.set_option |
#data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
#visualisation
import seaborn as sns
import matplotlib.pyplot as plt
#machine learning
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
train_df = pd.read_csv('train.csv')
test_df = pd.read_csv('test.csv')
results_format = pd.read_csv('gender_submission.csv')
combine = [train_df, test_df]
#print(train_df.columns.values)
#print(train_df.info())
#print(train_df.describe())
#A list of dtypes or strings to be included/excluded.
#To select all numeric types use numpy numpy.number.
#To select categorical objects use type object.
#See also the select_dtypes documentation. eg. df.describe(include=[‘O’])
#print(train_df.describe(include=['O']))
#df = train_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False) # is a dataframe
#print(train_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False))
# as_index - For aggregated output, return object with group labels as the index.
# ascending
#print(train_df[['Sex','Survived']].groupby(['Sex'], as_index=False).mean().sort_values(by='Survived', ascending=False))
#print(train_df[['SibSp','Survived']].groupby(['SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False))
#print(train_df[['Parch', 'Survived']].groupby(['Parch'], as_index=False).mean().sort_values(by='Survived',ascending=False))
#g = sns.FacetGrid(train_df, col='Survived')
#g.map(plt.hist, 'Age', bins=20)
#grid = sns.FacetGrid(train_df, col='Survived', row='Pclass', size=2.2, aspect=1.6)
#grid.map(plt.hist, 'Age', alpha=.5, bins=20)
#grid.add_legend()
#Correlating categorical features
#grid = sns.FacetGrid(train_df, row='Embarked', size= 2.2, aspect=1.6)
#grid.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette='deep')
#grid.add_legend()
#Correlating categorical and numerical features
#grid = sns.FacetGrid(train_df, col='Embarked', hue='Survived', palette={0: 'k', 1:'w'})
#grid = sns.FacetGrid(train_df, row='Embarked', col='Survived', size= 2.2, aspect=1.6)
#grid.map(sns.barplot, 'Sex', 'Fare', alpha=.5, ci=None)
#grid.add_legend()
#Correcting by dropping features
#print("Before", train_df.shape, test_df.shape, combine[0].shape, combine[1].shape)
train_df = train_df.drop(['Ticket', 'Cabin'], axis=1)
test_df = test_df.drop(['Ticket', 'Cabin'], axis=1)
combine = [train_df, test_df]
#print("After", train_df.shape, test_df.shape, combine[0].shape, combine[1].shape)
for dataset in combine:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\.', expand=False)
pd.crosstab(train_df['Title'], train_df['Sex'])
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
train_df[['Title', 'Survived']].groupby(['Title'], as_index=False).mean()
#convert cateogircal titles to ordinal
title_mapping = {"Mr":1, "Miss":2, "Mrs":3, "Master":4, "Rare":5}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
#print(train_df.head())
train_df = train_df.drop(['Name', 'PassengerId'], axis=1)
test_df = test_df.drop(['Name'],axis=1)
combine = [train_df, test_df]
#print(train_df.shape, test_df.shape)
#combine is of type list
for dataset in combine:
dataset['Sex'] = dataset['Sex'].map({'female':1, 'male':0}).astype(int)
train_df.head()
grid = sns.FacetGrid(train_df, row='Pclass', col='Sex', size=2.2, aspect=1.6)
grid.map(plt.hist, 'Age', alpha=.5, bins=20)
grid.add_legend()
#empty array to contain guessed age values based on Pclass X Gender combinations
guess_ages = np.zeros((2,3))
for dataset in combine:
for i in range(0,2):
for j in range(0,3):
guess_df = dataset[(dataset['Sex']==i) & (dataset['Pclass'] == j+1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i,j] = int(age_guess/0.5 + 0.5) * 0.5
for i in range(0,2):
for j in range(0,3):
dataset.loc[(dataset.Age.isnull()) & (dataset.Sex == i) & (dataset.Pclass == j+1), 'Age'] = guess_ages[i,j]
dataset['Age'] = dataset['Age'].astype(int)
#train_df.head()
#create age bands and determine correlations with survived
train_df['AgeBand'] = pd.cut(train_df['Age'],5)
train_df[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False).mean().sort_values(by='AgeBand', ascending = True)
for dataset in combine:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age']
#train_df.head()
train_df = train_df.drop(['AgeBand'], axis=1)
combine = [train_df, test_df]
#train_df.head()
for dataset in combine:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
train_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean().sort_values(by='Survived', ascending=False)
for dataset in combine:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
train_df[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False).mean()
train_df = train_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
test_df = test_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
combine = [train_df, test_df]
#train_df.head()
for dataset in combine:
dataset['Age*Class'] = dataset.Age * dataset.Pclass
train_df.loc[:, ['Age*Class', 'Age', 'Pclass']].head(10)
freq_port = train_df.Embarked.dropna().mode()[0]
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].fillna(freq_port)
train_df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean().sort_values(by='Survived', ascending=False)
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)
#train_df.head()
test_df['Fare'].fillna(test_df['Fare'].dropna().median(), inplace=True)
#test_df.head()
train_df['FareBand'] = pd.qcut(train_df['Fare'], 4)
train_df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False).mean().sort_values(by='FareBand', ascending=True)
for dataset in combine:
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
train_df = train_df.drop(['FareBand'], axis=1)
combine = [train_df, test_df]
#train_df.head(10)
#Model, predict and solve
X_train = train_df.drop("Survived", axis=1)
Y_train = train_df["Survived"]
X_test = test_df.drop("PassengerId", axis=1).copy()
print(X_test.head())
#print(X_train.shape, Y_train.shape, X_test.shape)
"""
#Logistic Regression
logreg = LogisticRegression()
logreg.fit(X_train, Y_train)
Y_pred = logreg.predict(X_test)
acc_log = round(logreg.score(X_train, Y_train) * 100, 2)
coeff_df = pd.DataFrame(train_df.columns.delete(0))
coeff_df.columns = ['Feature']
coeff_df["Correlation"] = pd.Series(logreg.coef_[0])
coeff_df.sort_values(by='Correlation', ascending=False)
# Support Vector Machines
svc = SVC()
svc.fit(X_train, Y_train)
Y_pred = svc.predict(X_test)
acc_svc = round(svc.score(X_train, Y_train) * 100, 2)
"""
# K Nearest Neighbours
knn = KNeighborsClassifier(n_neighbors = 3)
knn.fit(X_train, Y_train)
Y_pred = knn.predict(X_test)
acc_knn = round(knn.score(X_train, Y_train) * 100, 2)
predicted_results = pd.DataFrame(Y_pred)
results_format.drop(['Survived'], axis=1, inplace=True)
predicted_results.columns = ['Survived']
combined_final = | pd.concat([results_format, predicted_results], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
import random
from pandasqt.compat import Qt, QtCore, QtGui
import pytest
import pytestqt
import decimal
import numpy
import pandas
from pandasqt.models.DataFrameModel import DataFrameModel, DATAFRAME_ROLE
from pandasqt.models.DataSearch import DataSearch
from pandasqt.models.SupportedDtypes import SupportedDtypes
def test_initDataFrame():
model = DataFrameModel()
assert model.dataFrame().empty
def test_initDataFrameWithDataFrame():
dataFrame = pandas.DataFrame([0], columns=['A'])
model = DataFrameModel(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
def test_setDataFrame():
dataFrame = pandas.DataFrame([0], columns=['A'])
model = DataFrameModel()
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
with pytest.raises(TypeError) as excinfo:
model.setDataFrame(None)
assert "pandas.core.frame.DataFrame" in unicode(excinfo.value)
@pytest.mark.parametrize(
"copy, operator",
[
(True, numpy.not_equal),
(False, numpy.equal)
]
)
def test_copyDataFrame(copy, operator):
dataFrame = pandas.DataFrame([0], columns=['A'])
model = DataFrameModel(dataFrame, copyDataFrame=copy)
assert operator(id(model.dataFrame()), id(dataFrame))
model.setDataFrame(dataFrame, copyDataFrame=copy)
assert operator(id(model.dataFrame()), id(dataFrame))
def test_TimestampFormat():
model = DataFrameModel()
assert model.timestampFormat == Qt.ISODate
newFormat = u"yy-MM-dd hh:mm"
model.timestampFormat = newFormat
assert model.timestampFormat == newFormat
with pytest.raises(TypeError) as excinfo:
model.timestampFormat = "yy-MM-dd hh:mm"
assert "unicode" in unicode(excinfo.value)
#def test_signalUpdate(qtbot):
#model = DataFrameModel()
#with qtbot.waitSignal(model.layoutAboutToBeChanged) as layoutAboutToBeChanged:
#model.signalUpdate()
#assert layoutAboutToBeChanged.signal_triggered
#with qtbot.waitSignal(model.layoutChanged) as blocker:
#model.signalUpdate()
#assert blocker.signal_triggered
@pytest.mark.parametrize(
"orientation, role, index, expectedHeader",
[
(Qt.Horizontal, Qt.EditRole, 0, None),
(Qt.Vertical, Qt.EditRole, 0, None),
(Qt.Horizontal, Qt.DisplayRole, 0, 'A'),
(Qt.Horizontal, Qt.DisplayRole, 1, None), # run into IndexError
(Qt.Vertical, Qt.DisplayRole, 0, 0),
(Qt.Vertical, Qt.DisplayRole, 1, 1)
]
)
def test_headerData(orientation, role, index, expectedHeader):
model = DataFrameModel(pandas.DataFrame([0], columns=['A']))
assert model.headerData(index, orientation, role) == expectedHeader
def test_flags():
model = DataFrameModel(pandas.DataFrame([0], columns=['A']))
index = model.index(0, 0)
assert index.isValid()
assert model.flags(index) == Qt.ItemIsSelectable | Qt.ItemIsEnabled
model.enableEditing(True)
assert model.flags(index) == Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
model.setDataFrame(pandas.DataFrame([True], columns=['A']))
index = model.index(0, 0)
model.enableEditing(True)
assert model.flags(index) != Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
assert model.flags(index) == Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable
def test_rowCount():
model = DataFrameModel(pandas.DataFrame([0], columns=['A']))
assert model.rowCount() == 1
model = DataFrameModel(pandas.DataFrame(numpy.arange(100), columns=['A']))
assert model.rowCount() == 100
def test_columnCount():
model = DataFrameModel(pandas.DataFrame([0], columns=['A']))
assert model.columnCount() == 1
model = DataFrameModel( pandas.DataFrame(numpy.arange(100).reshape(1, 100), columns=numpy.arange(100)) )
assert model.columnCount() == 100
class TestSort(object):
@pytest.fixture
def dataFrame(self):
return pandas.DataFrame(numpy.random.rand(10), columns=['A'])
@pytest.fixture
def model(self, dataFrame):
return DataFrameModel(dataFrame)
@pytest.mark.parametrize(
"signal",
[
"layoutAboutToBeChanged",
"layoutChanged",
"sortingAboutToStart",
"sortingFinished",
]
)
def test_signals(self, model, qtbot, signal):
with qtbot.waitSignal(getattr(model, signal)) as blocker:
model.sort(0)
assert blocker.signal_triggered
def test_returnValues(self, model):
model.sort(0)
@pytest.mark.parametrize(
"testAscending, modelAscending, isIdentic",
[
(True, Qt.AscendingOrder, True),
(False, Qt.DescendingOrder, True),
(True, Qt.DescendingOrder, False),
]
)
def test_sort(self, model, dataFrame, testAscending, modelAscending, isIdentic):
temp = dataFrame.sort('A', ascending=testAscending)
model.sort(0, order=modelAscending)
assert (dataFrame['A'] == temp['A']).all() == isIdentic
class TestData(object):
@pytest.fixture
def dataFrame(self):
return pandas.DataFrame(numpy.random.rand(10), columns=['A'])
@pytest.fixture
def model(self, dataFrame):
return DataFrameModel(dataFrame)
@pytest.fixture
def index(self, model):
index = model.index(0, 0)
assert index.isValid()
return index
def test_invalidIndex(self, model):
assert model.data(QtCore.QModelIndex()) is None
def test_unknownRole(self, model, index):
assert index.isValid()
assert model.data(index, role="unknownRole") == None
def test_unhandledDtype(self, model, index):
dataFrame = pandas.DataFrame([92.289+151.96j], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(numpy.complex64)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
assert model.data(index) == None
# with pytest.raises(TypeError) as excinfo:
# model.data(index)
# assert "unhandled data type" in unicode(excinfo.value)
@pytest.mark.parametrize(
"value, dtype", [
("test", object),
(u"äöü", object),
]
)
def test_strAndUnicode(self, model, index, value, dtype):
dataFrame = pandas.DataFrame([value], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(dtype)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
assert model.data(index) == value
assert model.data(index, role=Qt.DisplayRole) == value
assert model.data(index, role=Qt.EditRole) == value
assert model.data(index, role=Qt.CheckStateRole) == None
assert isinstance(model.data(index, role=DATAFRAME_ROLE), dtype)
@pytest.mark.parametrize(
"value, dtype, precision", [
(1, numpy.int8, None),
(1, numpy.int16, None),
(1, numpy.int32, None),
(1, numpy.int64, None),
(1, numpy.uint8, None),
(1, numpy.uint16, None),
(1, numpy.uint32, None),
(1, numpy.uint64, None),
(1.11111, numpy.float16, DataFrameModel._float_precisions[str('float16')]),
(1.11111111, numpy.float32, DataFrameModel._float_precisions[str('float32')]),
(1.1111111111111111, numpy.float64, DataFrameModel._float_precisions[str('float64')])
]
)
def test_numericalValues(self, model, index, value, dtype, precision):
dataFrame = pandas.DataFrame([value], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(dtype)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
if precision:
modelValue = model.data(index, role=Qt.DisplayRole)
assert model.data(index) == round(value, precision)
assert model.data(index, role=Qt.DisplayRole) == round(value, precision)
assert model.data(index, role=Qt.EditRole) == round(value, precision)
else:
assert model.data(index) == value
assert model.data(index, role=Qt.DisplayRole) == value
assert model.data(index, role=Qt.EditRole) == value
assert model.data(index, role=Qt.CheckStateRole) == None
assert isinstance(model.data(index, role=DATAFRAME_ROLE), dtype)
assert model.data(index, role=DATAFRAME_ROLE).dtype == dtype
#@<EMAIL>.parametrize(
#"border1, modifier, border2, dtype", [
#("min", -1, "max", numpy.uint8),
#("max", +1, "min", numpy.uint8),
#("min", -1, "max", numpy.uint16),
#("max", +1, "min", numpy.uint16),
#("min", -1, "max", numpy.uint32),
#("max", +1, "min", numpy.uint32),
#("min", -1, "max", numpy.uint64),
##("max", +1, "min", numpy.uint64), # will raise OverFlowError caused by astype function,
## uneffects models data method
#("min", -1, "max", numpy.int8),
#("max", +1, "min", numpy.int8),
#("min", -1, "max", numpy.int16),
#("max", +1, "min", numpy.int16),
#("min", -1, "max", numpy.int32),
#("max", +1, "min", numpy.int32),
##("min", -1, "max", numpy.int64), # will raise OverFlowError caused by astype function
## uneffects models data method
##("max", +1, "min", numpy.int64), # will raise OverFlowError caused by astype function
## uneffects models data method
#]
#)
#def test_integerBorderValues(self, model, index, border1, modifier, border2, dtype):
#ii = numpy.iinfo(dtype)
#dataFrame = pandas.DataFrame([getattr(ii, border1) + modifier], columns=['A'])
#dataFrame['A'] = dataFrame['A'].astype(dtype)
#model.setDataFrame(dataFrame)
#assert not model.dataFrame().empty
#assert model.dataFrame() is dataFrame
#assert index.isValid()
#assert model.data(index) == getattr(ii, border2)
@pytest.mark.parametrize(
"value, qtbool",
[
(True, Qt.Checked),
(False, Qt.Unchecked)
]
)
def test_bool(self, model, index, value, qtbool):
dataFrame = pandas.DataFrame([value], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(numpy.bool_)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
assert model.data(index, role=Qt.DisplayRole) == value
assert model.data(index, role=Qt.EditRole) == value
assert model.data(index, role=Qt.CheckStateRole) == qtbool
assert model.data(index, role=DATAFRAME_ROLE) == value
assert isinstance(model.data(index, role=DATAFRAME_ROLE), numpy.bool_)
def test_date(self, model, index):
pandasDate = | pandas.Timestamp("1990-10-08T10:15:45") | pandas.Timestamp |
from typing import Tuple
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from etna.datasets import generate_ar_df
from etna.datasets.tsdataset import TSDataset
from etna.transforms import DateFlagsTransform
from etna.transforms import TimeSeriesImputerTransform
@pytest.fixture()
def tsdf_with_exog(random_seed) -> TSDataset:
df_1 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-02-01", "2021-07-01", freq="1d")})
df_2 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-02-01", "2021-07-01", freq="1d")})
df_1["segment"] = "Moscow"
df_1["target"] = [x ** 2 + np.random.uniform(-2, 2) for x in list(range(len(df_1)))]
df_2["segment"] = "Omsk"
df_2["target"] = [x ** 0.5 + np.random.uniform(-2, 2) for x in list(range(len(df_2)))]
classic_df = pd.concat([df_1, df_2], ignore_index=True)
df = classic_df.pivot(index="timestamp", columns="segment")
df = df.reorder_levels([1, 0], axis=1)
df = df.sort_index(axis=1)
df.columns.names = ["segment", "feature"]
exog = generate_ar_df(start_time="2021-01-01", periods=600, n_segments=2)
exog = exog.pivot(index="timestamp", columns="segment")
exog = exog.reorder_levels([1, 0], axis=1)
exog = exog.sort_index(axis=1)
exog.columns.names = ["segment", "feature"]
exog.columns = pd.MultiIndex.from_arrays([["Moscow", "Omsk"], ["exog", "exog"]])
ts = TSDataset(df=df, df_exog=exog, freq="1D")
return ts
@pytest.fixture()
def df_and_regressors() -> Tuple[pd.DataFrame, pd.DataFrame]:
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df_1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df_2 = pd.DataFrame({"timestamp": timestamp[5:], "target": 12, "segment": "2"})
df = pd.concat([df_1, df_2], ignore_index=True)
df = TSDataset.to_dataset(df)
timestamp = | pd.date_range("2020-12-01", "2021-02-11") | pandas.date_range |
import os
import sys
import numpy as np
import datetime
import itertools
sys.path.append('C:\\Program Files\\Continuum\\Anaconda3\\Lib\\site-packages')
import matplotlib.pyplot as plt
import pandas as pd
#import library as mio
def get_data(symbols, dates):
#"""Read stock data (adjusted close) for given symbols from CSV files."""
df_final = pd.DataFrame(index=dates)
i=0
for symbol in symbols:
path = os.path.dirname(os.path.realpath(__file__))
print ("Loading csv..." + str(symbol))
file_path = path + "\\raw_data\\" + symbol + ".csv"
df_temp = pd.read_csv(file_path, parse_dates=True, index_col="Date",usecols=["Date", "Adj Close"], na_values=["nan"])
df_temp = df_temp.rename(columns={"Adj Close": symbol})
df_final = df_final.join(df_temp)
i+=1
if i == 1: # drop dates SPY did not trade
df_final = df_final.dropna(subset=[symbol])
return df_final
def write_Excel(__df_r1, filename):
print ("Printing Report...")
total_file = os.path.dirname(os.path.realpath(__file__)) + "\\raw_data\\" + filename
writer = | pd.ExcelWriter(total_file) | pandas.ExcelWriter |
import pandas as pd
import numpy as np
from impute_values import impute_values
from booleanize import booleanize
from one_hot import one_hot
skipped_reactors = [
"COMPASS", "MAST", "START",
"T10", "TEXTOR", "TUMAN3M"
]
def load_tokamaks():
tokamak_data = pd.read_csv("./tokamak_shots.csv", low_memory=False)
tokamak_data.columns = tokamak_data.columns.str.lower()
tokamak_data.columns = tokamak_data.columns.str.replace("-", "_")
for cur_key in tokamak_data.columns:
try:
tokamak_data[cur_key] = tokamak_data[cur_key].str.strip()
except:
continue
tokamak_data = tokamak_data[tokamak_data.phase.str.startswith("H")]
tokamak_data = tokamak_data[~tokamak_data.tok.isin(skipped_reactors)]
tokamak_data.date[tokamak_data.date % 100 == 0] = tokamak_data.date[tokamak_data.date % 100 == 0] + 1
tokamak_data["seldb3"] = tokamak_data.seldb3.astype(int)
tokamak_data["is_good"] = list(map(int,( tokamak_data.seldb3 == 1111111111 )))
drop_columns = [
"area", "bepdia", "bepmhd", "bgasa2", "bgasz2", "bmhdmdia",
"bsource", "bsource2", "coctr", "dalfdv", "dalfmp",
"db2p5", "db2p8", "db3is", "db3v5", "deltal", "deltau",
"divname", "dwdia", "dwmhd", "echloc", "echmode", "elmdur",
"elmfreq", "elmint", "elmmax", "enbi", "evap", "hmws2003",
"iae2000n", "iae2000x", "iaea92", "icanten", "icform",
"icscheme", "igradb", "iseq", "lcupdate", "lhtime", "ne0",
"ne0tsc", "nelform", "palpha", "pellet", "pfloss", "pinj2",
"plth", "premag", "rmag", "seldb1", "seldb2", "seldb2x",
"seldb3", "seldb3x", "seplim", "shot", "spin", "t1", "t2",
"taudia", "taumhd", "tauth1", "tauth2", "te0", "ti0",
"time_id", "tok_id", "torq", "tpi", "vsurf", "vtor0",
"vtorimp", "vtorv", "wdia", "wekin", "wficform", "wficrh",
"wficrhp", "wfpar", "wfper", "wikin", "wkin", "xgasa",
"xgasz", "zeff", "zeffneo",
"bgasa", "bgasz", "wfform",
"h89", "hiter96l", "h93", "hiter92y", "hipb98y3",
"heps97", "hipb98y", "hipb98y1", "hipb98y2", "hipb98y4",
"indent", "echfreq", "ieml", "pecrh", "picrh",
"dneldt", "icfreq"
]
tokamak_data.drop(columns=drop_columns, inplace=True)
tokamak_data.replace([np.inf, -np.inf], np.nan, inplace=True)
tokamak_data.dropna(axis=1, how="all", inplace=True)
force_numerics(tokamak_data)
force_drops(tokamak_data)
tok_set = set(tokamak_data.tok)
print(len(tok_set), "Tokamaks")
for tok in set(tokamak_data.tok):
dtp = pd.to_datetime(tokamak_data[tokamak_data.tok==tok].date, format='%Y%m%d')
print([tok,len(dtp),f"{np.min(dtp).year} - {np.max(dtp).year}"])
tokamak_data.drop(columns=["date"], inplace=True)
one_hot(tokamak_data)
booleanize(tokamak_data)
print_dataframe = pd.DataFrame()
print_dataframe["good"] = [
len(tokamak_data[tokamak_data.is_good & (tokamak_data.tok == "JET")]),
len(tokamak_data[tokamak_data.is_good & (tokamak_data.tok != "JET")]),
len(tokamak_data[tokamak_data.is_good])
]
print_dataframe["bad"] = [
len(tokamak_data[~tokamak_data.is_good & (tokamak_data.tok == "JET")]),
len(tokamak_data[~tokamak_data.is_good & (tokamak_data.tok != "JET")]),
len(tokamak_data[~tokamak_data.is_good])
]
print_dataframe.index = ["jet", "else", "total"]
print()
print(print_dataframe)
return tokamak_data
def force_numerics(tokamak_data):
numeric_keys = []
for cur_key in tokamak_data.columns:
try:
tokamak_data[cur_key] = tokamak_data[cur_key].str.strip()
except:
numeric_keys.append(cur_key)
string_keys = [
work_key for work_key in tokamak_data.columns if work_key not in numeric_keys
]
for cur_key in string_keys:
tmp_col = tokamak_data[cur_key].copy().dropna()
cur_output = [
cur_value for cur_value in set(tmp_col.values)
if cur_value != ''
]
is_int = False
is_flt = False
try:
cur_output = [int(str(x)) for x in cur_output]
is_int = True
except:
pass
if not is_int:
try:
cur_output = [float(str(x)) for x in cur_output]
is_flt = True
except:
pass
if not is_int and not is_flt: continue
tokamak_data.loc[tokamak_data[cur_key] == "", cur_key] = np.nan
if is_flt:
tokamak_data[cur_key] = [float(x) for x in tokamak_data[cur_key]]
continue
cur_list = []
for work_value in tokamak_data[cur_key]:
if | pd.isnull(work_value) | pandas.isnull |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/3/5 19:27
Desc: 新浪财经-交易日历
https://finance.sina.com.cn/realstock/company/klc_td_sh.txt
此处可以用来更新 calendar.json 文件,注意末尾没有 "," 号
"""
import datetime
import pandas as pd
import requests
from py_mini_racer import py_mini_racer
from akshare.stock.cons import hk_js_decode
def tool_trade_date_hist_sina() -> pd.DataFrame:
"""
交易日历-历史数据
https://finance.sina.com.cn/realstock/company/klc_td_sh.txt
:return: 交易日历
:rtype: pandas.DataFrame
"""
url = "https://finance.sina.com.cn/realstock/company/klc_td_sh.txt"
r = requests.get(url)
js_code = py_mini_racer.MiniRacer()
js_code.eval(hk_js_decode)
dict_list = js_code.call(
"d", r.text.split("=")[1].split(";")[0].replace('"', "")
) # 执行js解密代码
temp_df = pd.DataFrame(dict_list)
temp_df.columns = ["trade_date"]
temp_df["trade_date"] = | pd.to_datetime(temp_df["trade_date"]) | pandas.to_datetime |
# IMPORTATION STANDARD
import os
# IMPORTATION THIRDPARTY
import pandas as pd
import pytest
# IMPORTATION INTERNAL
from gamestonk_terminal.etf import etf_controller
# pylint: disable=E1101
# pylint: disable=W0603
# pylint: disable=E1111
@pytest.fixture(scope="module")
def vcr_config():
return {
"filter_headers": [("User-Agent", None)],
"filter_query_parameters": [
("period1", "MOCK_PERIOD_1"),
("period2", "MOCK_PERIOD_2"),
("date", "MOCK_DATE"),
],
}
EMPTY_DF = pd.DataFrame()
DF_ETF = pd.DataFrame.from_dict(
data={
| pd.Timestamp("2020-11-30 00:00:00") | pandas.Timestamp |
import warnings
import pydot
import graphviz
# Take a look at the raw data :
import pandas as pd
from pandas import Series
from pandas import DataFrame
from pandas import read_csv
from sklearn import preprocessing
from sklearn.metrics import mean_squared_error
import matplotlib
# be able to save images on server
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from math import sqrt
import numpy as np
import tensorflow as tf
import random as rn
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/fchollet/keras/issues/2280#issuecomment-306959926
import os
import sys
import errno
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(
intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
import keras
from keras.layers import Input, Convolution1D, Dense, MaxPooling1D, Flatten, Conv2D
from keras.layers import LSTM
from keras.callbacks import Callback
from keras.callbacks import ModelCheckpoint
from keras.utils import plot_model
# be able to save images on server
# matplotlib.use('Agg')
import time
import datetime
from keras.models import load_model
import multiprocessing
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Hide messy TensorFlow warnings
warnings.filterwarnings("ignore") # Hide messy Numpy warnings
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
class EarlyStoppingByLossVal(Callback):
def __init__(self, monitor='val_loss', value=0.00001, verbose=0):
super(Callback, self).__init__()
self.monitor = monitor
self.value = value
self.verbose = verbose
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if current is None:
warnings.warn("Early stopping requires %s available!" %
self.monitor, RuntimeWarning)
if current < self.value:
if self.verbose > 0:
print("Epoch %05d: early stopping THR" % epoch)
self.model.stop_training = True
class RData:
def __init__(self, path, n_weeks=26):
self.path = path
self.data = {}
# load dataset
self.data['raw'] = self.load_data()
# config
self.n_weeks = n_weeks
self.n_features = int(len(self.data['raw'][0].columns))
print("number of features: {}".format(self.n_features))
# scale data
self.scaler = preprocessing.MinMaxScaler()
self.scale()
# reframe data
self.reframe()
# self.state_list_name = self.data.state.unique()
self.split_data()
# print(self.n_features)
# Return specific data
def __getitem__(self, index):
return self.data[index]
# load dataset
def load_data(self):
raw = | read_csv(self.path) | pandas.read_csv |
from datetime import datetime, timedelta
import warnings
import operator
from textwrap import dedent
import numpy as np
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timedelta)
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u, set_function_name
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.generic import (
ABCSeries, ABCDataFrame,
ABCMultiIndex,
ABCPeriodIndex, ABCTimedeltaIndex,
ABCDateOffset)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_dtype_union_equal,
is_object_dtype,
is_categorical,
is_categorical_dtype,
is_interval_dtype,
is_period_dtype,
is_bool,
is_bool_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_hashable,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.core.base import PandasObject, IndexOpsMixin
import pandas.core.common as com
from pandas.core import ops
from pandas.util._decorators import (
Appender, Substitution, cache_readonly, deprecate_kwarg)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.dtypes.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
from pandas.io.formats.printing import (
pprint_thing, default_pprint, format_object_summary, format_object_attrs)
from pandas.core.ops import make_invalid_op
from pandas.core.strings import StringMethods
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
target_klass='Index',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _make_comparison_op(op, cls):
def cmp_method(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if is_object_dtype(self) and self.nlevels == 1:
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(cmp_method, name, cls)
def _make_arithmetic_op(op, cls):
def index_arithmetic_method(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to subclass implementation
return NotImplemented
other = self._validate_for_numeric_binop(other, op)
# handle time-based others
if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
return self._evaluate_with_timedelta_like(other, op)
elif isinstance(other, (datetime, np.datetime64)):
return self._evaluate_with_datetime_like(other, op)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
result = missing.dispatch_missing(op, values, other, result)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op is divmod:
result = (Index(result[0], **attrs), Index(result[1], **attrs))
else:
result = Index(result, **attrs)
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(index_arithmetic_method, name, cls)
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
Examples
--------
>>> pd.Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
See Also
---------
RangeIndex : Index implementing a monotonic integer range
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical, Index
IntervalIndex : an Index of :class:`Interval` s.
DatetimeIndex, TimedeltaIndex, PeriodIndex
Int64Index, UInt64Index, Float64Index
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_left_indexer_unique = libjoin.left_join_indexer_unique_object
_left_indexer = libjoin.left_join_indexer_object
_inner_indexer = libjoin.inner_join_indexer_object
_outer_indexer = libjoin.outer_join_indexer_object
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_is_numeric_dtype = False
_can_hold_na = True
# would we like our indexing holder to defer to us
_defer_to_indexing = False
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
_accessors = set(['str'])
str = CachedAccessor("str", StringMethods)
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, dtype=dtype, copy=copy, name=name,
**kwargs)
# interval
if is_interval_dtype(data) or is_interval_dtype(dtype):
from .interval import IntervalIndex
closed = kwargs.get('closed', None)
return IntervalIndex(data, dtype=dtype, name=name, copy=copy,
closed=closed)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if (is_datetime64_any_dtype(data) or
(dtype is not None and is_datetime64_any_dtype(dtype)) or
'tz' in kwargs):
from pandas.core.indexes.datetimes import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name,
dtype=dtype, **kwargs)
if dtype is not None and is_dtype_equal(_o_dtype, dtype):
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif (is_timedelta64_dtype(data) or
(dtype is not None and is_timedelta64_dtype(dtype))):
from pandas.core.indexes.timedeltas import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
try:
data = np.array(data, copy=copy, dtype=dtype)
except OverflowError:
# gh-15823: a more user-friendly error message
raise OverflowError(
"the elements provided in the data cannot "
"all be casted to the dtype {dtype}"
.format(dtype=dtype))
elif inferred in ['floating', 'mixed-integer-float']:
if isna(data).any():
raise ValueError('cannot convert float '
'NaN to integer')
# If we are actually all equal to integers,
# then coerce to integer.
try:
return cls._try_convert_to_int_index(
data, copy, name, dtype)
except ValueError:
pass
# Return an actual float index.
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype,
name=name)
elif inferred == 'string':
pass
else:
data = data.astype(dtype)
elif is_float_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'string':
pass
else:
data = data.astype(dtype)
else:
data = np.array(data, dtype=dtype, copy=copy)
except (TypeError, ValueError) as e:
msg = str(e)
if 'cannot convert float' in msg:
raise
# maybe coerce to a sub-class
from pandas.core.indexes.period import (
PeriodIndex, IncompatibleFrequency)
if isinstance(data, PeriodIndex):
return PeriodIndex(data, copy=copy, name=name, **kwargs)
if is_signed_integer_dtype(data.dtype):
from .numeric import Int64Index
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif is_unsigned_integer_dtype(data.dtype):
from .numeric import UInt64Index
return UInt64Index(data, copy=copy, dtype=dtype, name=name)
elif is_float_dtype(data.dtype):
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype('object')
else:
subarr = com._asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
try:
return cls._try_convert_to_int_index(
subarr, copy, name, dtype)
except ValueError:
pass
return Index(subarr, copy=copy,
dtype=object, name=name)
elif inferred in ['floating', 'mixed-integer-float']:
from .numeric import Float64Index
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'interval':
from .interval import IntervalIndex
return IntervalIndex(subarr, name=name, copy=copy)
elif inferred == 'boolean':
# don't support boolean explicitly ATM
pass
elif inferred != 'string':
if inferred.startswith('datetime'):
if (lib.is_datetime_with_singletz_array(subarr) or
'tz' in kwargs):
# only when subarr has the same tz
from pandas.core.indexes.datetimes import (
DatetimeIndex)
try:
return DatetimeIndex(subarr, copy=copy,
name=name, **kwargs)
except libts.OutOfBoundsDatetime:
pass
elif inferred.startswith('timedelta'):
from pandas.core.indexes.timedeltas import (
TimedeltaIndex)
return TimedeltaIndex(subarr, copy=copy, name=name,
**kwargs)
elif inferred == 'period':
try:
return PeriodIndex(subarr, name=name, **kwargs)
except IncompatibleFrequency:
pass
return cls._simple_new(subarr, name)
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or is_scalar(data):
cls._scalar_data_error(data)
else:
if tupleize_cols and is_list_like(data) and data:
if | is_iterator(data) | pandas.core.dtypes.common.is_iterator |
#Import general modules
import argparse, requests, re
import html
import pandas as pd
from os.path import join, isdir
#Define constants
testRun = False
regionOutputFile = 'region_frequencies'
globalOutputFile = 'global_frequencies'
#Setup url and parameters
frequencyURL = 'http://www.allelefrequencies.net/hla6006a_scr.asp'
frequencyLoci = ['A', 'B', 'C', 'DRB1', 'DQA1', 'DQB1', 'DPA1', 'DPB1']
regions = ['Australia', 'Europe', 'North Africa', 'North America', 'North-East Asia', 'Oceania', 'South and Central America', 'South Asia', 'South-East Asia', 'Sub-Saharan Africa', 'Western Asia']
params = {'hla_region': None, 'hla_locus': None, 'hla_show': '%3E'}
#Request header to prevent 403 error
requestHeader = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36",
"X-Requested-With": "XMLHttpRequest"
}
#Function to retrieve the frequencies from the allele frequencies website (http://www.allelefrequencies.net/).
#A tuple of dataframes is returned with all of the frequencies averaged per region and over all regions respectively.
#The samples sizes are also summed up. You can specify whether you want only 'positive', 'negative' or 'all' types of frequencies.
def retrieveFrequencies(regions, loci, hlaShow = 'all'):
#When hlaShow is '<' or '>' assign it to the request parameters
#when it is 'both' set it to None, otherwise throw an error
if hlaShow == 'negative':
params['hla_show'] = '='
elif hlaShow == 'positive':
params['hla_show'] = '>'
elif hlaShow == 'all':
params['hla_show'] = None
else:
raise ValueError("'hlaShow' should either be '<', '>' or 'both'.")
#Loop over ever region and locus to retrieve all allele frequencies
dfs = []
for region in regions:
for locus in loci:
print(f'Retrieving allele frequencies of locus {locus} from region {region}...')
#Retrieve the hmtl and parse it into a pandas dataframe
params['hla_locus'] = locus
params['hla_region'] = region
result = requests.get(frequencyURL, headers=requestHeader, params=params, timeout=None)
print(f'Frequencies succesfully retrieved from {result.url}')
df = pd.read_html(result.text, attrs={'class':'tblNormal'})[0]
#Filter the table to drop the 'Line' column (so it get not averaged)
#and remove any rows with Nan values in the allele frequency column
#df = df[df['Allele Frequency'] != 0]
df.drop(columns='Line', inplace=True)
df.dropna(axis = 0, how = 'any', subset = ['Allele Frequency'], inplace=True)
#Average the frequencies per allele per region
#Skip if no allele frequencies had been returned
try:
df = df.groupby('Allele').agg({'Allele Frequency': 'mean', 'Sample Size': 'sum'})
except pd.core.base.DataError:
continue
#Expand the allele index into a column
df.reset_index(level=['Allele'], inplace=True)
#Rename the 'Allele', 'Allele Frequency' and 'Sample Size' columns
df.rename(columns = {'Allele': 'allele', 'Allele Frequency': 'avg_frequency', 'Sample Size': 'total_sample_size'}, inplace=True)
#Add locus and region columns
df['locus'] = locus
df['region'] = region
#Append it to the list of dataframes
dfs.append(df)
#If this is a test run, break
if testRun:
break
if testRun:
break
#Also average the allele frequencies over all regions, so each regions weighs the same
regionMean = | pd.concat(dfs) | pandas.concat |
"""
Copyright (c) 2021, Electric Power Research Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of DER-VET nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
CT Sizing class
This Python class contains methods and attributes specific for technology analysis within StorageVet.
"""
import cvxpy as cvx
from dervet.MicrogridDER.RotatingGeneratorSizing import RotatingGeneratorSizing
import pandas as pd
import storagevet.Library as Lib
import numpy as np
from storagevet.ErrorHandling import *
from dervet.DERVETParams import ParamsDER
class CT(RotatingGeneratorSizing):
""" An Combustion Turbine (CT) generator, with sizing optimization
"""
def __init__(self, params):
""" Initialize all technology with the following attributes.
Args:
params (dict): Dict of parameters for initialization
"""
TellUser.debug(f"Initializing {__name__}")
super().__init__(params)
self.tag = 'CT'
self.heat_rate = params['heat_rate'] # BTU/kWh
# time series inputs
self.natural_gas_price = params['natural_gas_price'] # $/MillionBTU
def grow_drop_data(self, years, frequency, load_growth):
""" Adds data by growing the given data OR drops any extra data that might have slipped in.
Update variable that hold timeseries data after adding growth data. These method should be called after
add_growth_data and before the optimization is run.
Args:
years (List): list of years for which analysis will occur on
frequency (str): period frequency of the timeseries data
load_growth (float): percent/ decimal value of the growth rate of loads in this simulation
"""
self.natural_gas_price = Lib.fill_extra_data(self.natural_gas_price, years, 0, frequency)
# TODO: change growth rate of fuel prices (user input?)
self.natural_gas_price = Lib.drop_extra_data(self.natural_gas_price, years)
def objective_function(self, mask, annuity_scalar=1):
costs = super().objective_function(mask, annuity_scalar)
total_out = self.variables_dict['elec'] + self.variables_dict['udis']
# natural gas fuel costs in $/kW
costs[self.name + ' naturalgas_fuel_cost'] = cvx.sum(cvx.multiply(total_out, self.heat_rate *
(self.natural_gas_price.loc[mask] * 1e6) * self.dt * annuity_scalar))
return costs
def timeseries_report(self):
""" Summaries the optimization results for this DER.
Returns: A timeseries dataframe with user-friendly column headers that summarize the results
pertaining to this instance
"""
tech_id = self.unique_tech_id()
results = super().timeseries_report()
results[tech_id + ' Natural Gas Price ($/MillionBTU)'] = self.natural_gas_price
return results
def update_price_signals(self, id_str, monthly_data=None, time_series_data=None):
""" Updates attributes related to price signals with new price signals that are saved in
the arguments of the method. Only updates the price signals that exist, and does not require all
price signals needed for this service.
Args:
monthly_data (DataFrame): monthly data after pre-processing
time_series_data (DataFrame): time series data after pre-processing
"""
if monthly_data is not None:
freq = self.natural_gas_price.freq
try:
self.natural_gas_price = ParamsDER.monthly_to_timeseries(freq, monthly_data.loc[:, [f"Natural Gas Price ($/MillionBTU)/{id_str}"]]),
except KeyError:
try:
self.natural_gas_price = ParamsDER.monthly_to_timeseries(freq, monthly_data.loc[:, [f"Natural Gas Price ($/MillionBTU)"]]),
except KeyError:
pass
def proforma_report(self, apply_inflation_rate_func, fill_forward_func, results):
""" Calculates the proforma that corresponds to participation in this value stream
Args:
apply_inflation_rate_func:
fill_forward_func:
results (pd.DataFrame):
Returns: A DateFrame of with each year in opt_year as the index and
the corresponding value this stream provided.
"""
pro_forma = super().proforma_report(apply_inflation_rate_func, fill_forward_func, results)
tech_id = self.unique_tech_id()
fuel_col_name = tech_id + ' Natural Gas Costs'
elec = self.variables_df['elec']
analysis_years = self.variables_df.index.year.unique()
fuel_costs_df = pd.DataFrame()
for year in analysis_years:
elec_sub = elec.loc[elec.index.year == year]
# add diesel fuel costs in $/kW
fuel_costs_df.loc[ | pd.Period(year=year, freq='y') | pandas.Period |
# coding=utf-8
# Author: <NAME>
# Date: Jun 30, 2019
#
# Description: Indexes certain genes and exports their list.
#
#
import math
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import argparse
from utils import ensurePathExists
from scipy.stats import ks_2samp
from itertools import combinations
import swifter
# Separating by At Least One Match
def select_by_at_least_one_match(ilist, keeplist):
# Only keep genes that are found in any of our gene list
return [i for i in ilist if i in keeplist]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
celltypes = ['spermatocyte', 'spermatogonia', 'spermatid', 'enterocyte', 'neuron', 'muscle']
parser.add_argument("--celltype", default='spermatocyte', type=str, choices=celltypes, help="Cell type. Defaults to spermatocyte")
parser.add_argument("--biotype", default='protein_coding', type=str, choices=['protein_coding'], help="Filter nodes by biotype (e.g., protein-coding)")
parser.add_argument("--attribute", default='TPM', type=str, help="Which attribute to plot. Defaults to 'TPM'.")
# parser.add_argument("--log", default=True, type=bool, help="Transforms attribute into log2(attribute).")
parser.add_argument("--minTPM", default=1, type=int, help="minLogTPM = math.log2(x). Defaults to 1.")
args = parser.parse_args()
celltype = args.celltype # spermatocyte or enterocyte
biotype = args.biotype
attribute = args.attribute
# log = args.log
minTPM = args.minTPM
print('Exporint {celltype:s}-{biotype:s}-{attribute:s}'.format(celltype=celltype, biotype=biotype, attribute=attribute))
print('Loading {celltype:s} Files'.format(celltype=celltype))
path = '../../02-core_genes/results/'
df_HS = pd.read_csv(path + 'FPKM/HS/HS-FPKM-{celltype:s}.csv.gz'.format(celltype=celltype), index_col='id_gene')
df_MM = pd.read_csv(path + 'FPKM/MM/MM-FPKM-{celltype:s}.csv.gz'.format(celltype=celltype), index_col='id_gene')
df_DM = pd.read_csv(path + 'FPKM/DM/DM-FPKM-{celltype:s}.csv.gz'.format(celltype=celltype), index_col='id_gene')
# Remove Duplicates
df_HS = df_HS.loc[~df_HS.index.duplicated(keep='first'), :]
df_MM = df_MM.loc[~df_MM.index.duplicated(keep='first'), :]
df_DM = df_DM.loc[~df_DM.index.duplicated(keep='first'), :]
# minTPM
if minTPM:
df_HS = df_HS.loc[(df_HS['TPM'] >= minTPM), :]
df_MM = df_MM.loc[(df_MM['TPM'] >= minTPM), :]
df_DM = df_DM.loc[(df_DM['TPM'] >= minTPM), :]
# Meta Genes
print('Loading {celltype:s} meta genes'.format(celltype=celltype))
dfM = pd.read_csv(path + 'meta-genes/meta-{celltype:s}-genes.csv.gz'.format(celltype=celltype), index_col='id_eggnog', usecols=['id_eggnog', 'id_string_HS', 'id_string_MM', 'id_string_DM'])
dfM['id_string_HS'] = dfM['id_string_HS'].apply(lambda x: x.split(',') if not | pd.isnull(x) | pandas.isnull |
import logging
from typing import Tuple
import pandas as pd
from pandas import DataFrame
from dbnd import task
from dbnd.testing.helpers_pytest import assert_run_task
from dbnd_test_scenarios.test_common.targets.target_test_base import TargetTestBase
logger = logging.getLogger(__name__)
@task(result=("features", "scores"))
def t_d_multiple_return(p: int) -> (DataFrame, int):
return pd.DataFrame(data=[[p, 1]], columns=["c1", "c2"]), 5
@task(result=("features", "scores"))
def t_d_multiple_tuple_return(p: int) -> Tuple[DataFrame, int]:
return | pd.DataFrame(data=[[p, 1]], columns=["c1", "c2"]) | pandas.DataFrame |
import pandas as pd
# create lookup tables for region codes and population sizes
abbreviations = ["DE",'DE-BW', 'DE-BY', 'DE-HB', 'DE-HH', 'DE-HE',
'DE-NI', 'DE-NW', 'DE-RP', 'DE-SL', 'DE-SH', 'DE-BB',
'DE-MV', 'DE-SN', 'DE-ST', 'DE-TH', 'DE-BE']
regions = ['Bundesgebiet', 'Baden-Württemberg', 'Bayern','Bremen', 'Hamburg','Hessen',
'Niedersachsen', 'Nordrhein-Westfalen', 'Rheinland-Pfalz', 'Saarland', "Schleswig-Holstein", "Brandenburg",
'Mecklenburg-Vorpommern', 'Sachsen', 'Sachsen-Anhalt', 'Thüringen', 'Berlin']
population = [83138368, 11097559, 13143271, 680058, 1855315, 6289594,
8020228, 17933411, 4115030, 985524, 2929662, 2522277,
1617005, 4088171, 2179946, 2120909,3663699]
region_dict = dict(zip(regions, abbreviations))
population_dict = dict(zip(abbreviations, population))
# create lookup table for fraction of age groups
age_groups = ["00+", "00-04", "05-14", "15-34", "35-59", "60-79", "80+"]
fractions = [1, 3954455/83138368, 7504156/83138368, 18915114/83138368,
28676427/83138368, 18150355/83138368, 5934038/83138368]
age_group_fractions = dict(zip(age_groups, fractions))
# get current date
date = pd.to_datetime('today').date()
url = f"https://gfx.sueddeutsche.de/storytelling-assets/datenteam/2021_corona-automation/hosp_incidence/" \
f"archive/{date}_hosp_incidence_nowcast_sz.csv"
# import csv file as a dataframe
df = pd.read_csv(url, sep=',', parse_dates=["Datum"] )
# remove rows with missing values
df.dropna(inplace = True)
# drop irrelevant columns
df.drop(columns = ["Bundesland_Id","offizielle Hospitalisierungsinzidenz","Obergrenze","Untergrenze"], inplace = True)
# rename locations according to submission guidelines
df.Bundesland.replace(region_dict, inplace = True)
# rename columns
df.rename(columns = {'Datum': 'target_end_date', 'Bundesland': 'location', 'Altersgruppe': 'age_group'}, inplace = True)
# rearrange in long format
df = | pd.melt(df, id_vars = ['target_end_date', 'location', 'age_group'], var_name = 'quantile') | pandas.melt |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.